diff options
264 files changed, 6003 insertions, 2781 deletions
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines index c37c43186e..1a7bc4591c 100644 --- a/Documentation/CodingGuidelines +++ b/Documentation/CodingGuidelines @@ -217,6 +217,9 @@ For C programs: . since mid 2017 with 512f41cf, we have been using designated initializers for array (e.g. "int array[10] = { [5] = 2 }"). + . since early 2021 with 765dc168882, we have been using variadic + macros, mostly for printf-like trace and debug macros. + These used to be forbidden, but we have not heard any breakage report, and they are assumed to be safe. diff --git a/Documentation/Makefile b/Documentation/Makefile index ed656db2ae..1eb9192dae 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -1,3 +1,6 @@ +# Import tree-wide shared Makefile behavior and libraries +include ../shared.mak + # Guard against environment variables MAN1_TXT = MAN5_TXT = @@ -215,38 +218,6 @@ DEFAULT_EDITOR_SQ = $(subst ','\'',$(DEFAULT_EDITOR)) ASCIIDOC_EXTRA += -a 'git-default-editor=$(DEFAULT_EDITOR_SQ)' endif -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir -QUIET_SUBDIR1 = - -ifneq ($(findstring $(MAKEFLAGS),w),w) -PRINT_DIR = --no-print-directory -else # "make -w" -NO_SUBDIR = : -endif - -ifneq ($(findstring $(MAKEFLAGS),s),s) -ifndef V - QUIET = @ - QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@; - QUIET_XMLTO = @echo ' ' XMLTO $@; - QUIET_DB2TEXI = @echo ' ' DB2TEXI $@; - QUIET_MAKEINFO = @echo ' ' MAKEINFO $@; - QUIET_DBLATEX = @echo ' ' DBLATEX $@; - QUIET_XSLTPROC = @echo ' ' XSLTPROC $@; - QUIET_GEN = @echo ' ' GEN $@; - QUIET_STDERR = 2> /dev/null - QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ - $(MAKE) $(PRINT_DIR) -C $$subdir - - QUIET_LINT_GITLINK = @echo ' ' LINT GITLINK $<; - QUIET_LINT_MANSEC = @echo ' ' LINT MAN SEC $<; - QUIET_LINT_MANEND = @echo ' ' LINT MAN END $<; - - export V -endif -endif - all: html man html: $(DOC_HTML) @@ -463,25 +434,11 @@ quick-install-html: require-htmlrepo print-man1: @for i in $(MAN1_TXT); do echo $$i; done -## Lint: Common -.build: - $(QUIET)mkdir $@ -.build/lint-docs: | .build - $(QUIET)mkdir $@ - ## Lint: gitlink -.build/lint-docs/gitlink: | .build/lint-docs - $(QUIET)mkdir $@ -.build/lint-docs/gitlink/howto: | .build/lint-docs/gitlink - $(QUIET)mkdir $@ -.build/lint-docs/gitlink/config: | .build/lint-docs/gitlink - $(QUIET)mkdir $@ LINT_DOCS_GITLINK = $(patsubst %.txt,.build/lint-docs/gitlink/%.ok,$(HOWTO_TXT) $(DOC_DEP_TXT)) -$(LINT_DOCS_GITLINK): | .build/lint-docs/gitlink -$(LINT_DOCS_GITLINK): | .build/lint-docs/gitlink/howto -$(LINT_DOCS_GITLINK): | .build/lint-docs/gitlink/config $(LINT_DOCS_GITLINK): lint-gitlink.perl $(LINT_DOCS_GITLINK): .build/lint-docs/gitlink/%.ok: %.txt + $(call mkdir_p_parent_template) $(QUIET_LINT_GITLINK)$(PERL_PATH) lint-gitlink.perl \ $< \ $(HOWTO_TXT) $(DOC_DEP_TXT) \ @@ -492,23 +449,18 @@ $(LINT_DOCS_GITLINK): .build/lint-docs/gitlink/%.ok: %.txt lint-docs-gitlink: $(LINT_DOCS_GITLINK) ## Lint: man-end-blurb -.build/lint-docs/man-end-blurb: | .build/lint-docs - $(QUIET)mkdir $@ LINT_DOCS_MAN_END_BLURB = $(patsubst %.txt,.build/lint-docs/man-end-blurb/%.ok,$(MAN_TXT)) -$(LINT_DOCS_MAN_END_BLURB): | .build/lint-docs/man-end-blurb $(LINT_DOCS_MAN_END_BLURB): lint-man-end-blurb.perl $(LINT_DOCS_MAN_END_BLURB): .build/lint-docs/man-end-blurb/%.ok: %.txt + $(call mkdir_p_parent_template) $(QUIET_LINT_MANEND)$(PERL_PATH) lint-man-end-blurb.perl $< >$@ .PHONY: lint-docs-man-end-blurb -lint-docs-man-end-blurb: $(LINT_DOCS_MAN_END_BLURB) ## Lint: man-section-order -.build/lint-docs/man-section-order: | .build/lint-docs - $(QUIET)mkdir $@ LINT_DOCS_MAN_SECTION_ORDER = $(patsubst %.txt,.build/lint-docs/man-section-order/%.ok,$(MAN_TXT)) -$(LINT_DOCS_MAN_SECTION_ORDER): | .build/lint-docs/man-section-order $(LINT_DOCS_MAN_SECTION_ORDER): lint-man-section-order.perl $(LINT_DOCS_MAN_SECTION_ORDER): .build/lint-docs/man-section-order/%.ok: %.txt + $(call mkdir_p_parent_template) $(QUIET_LINT_MANSEC)$(PERL_PATH) lint-man-section-order.perl $< >$@ .PHONY: lint-docs-man-section-order lint-docs-man-section-order: $(LINT_DOCS_MAN_SECTION_ORDER) @@ -524,7 +476,4 @@ doc-l10n install-l10n:: $(MAKE) -C po $@ endif -# Delete the target file on error -.DELETE_ON_ERROR: - .PHONY: FORCE diff --git a/Documentation/MyFirstObjectWalk.txt b/Documentation/MyFirstObjectWalk.txt index ca267941f3..8d9e85566e 100644 --- a/Documentation/MyFirstObjectWalk.txt +++ b/Documentation/MyFirstObjectWalk.txt @@ -522,24 +522,25 @@ function shows that the all-object walk is being performed by `traverse_commit_list()` or `traverse_commit_list_filtered()`. Those two functions reside in `list-objects.c`; examining the source shows that, despite the name, these functions traverse all kinds of objects. Let's have a look at -the arguments to `traverse_commit_list_filtered()`, which are a superset of the -arguments to the unfiltered version. +the arguments to `traverse_commit_list()`. -- `struct list_objects_filter_options *filter_options`: This is a struct which - stores a filter-spec as outlined in `Documentation/rev-list-options.txt`. -- `struct rev_info *revs`: This is the `rev_info` used for the walk. +- `struct rev_info *revs`: This is the `rev_info` used for the walk. If + its `filter` member is not `NULL`, then `filter` contains information for + how to filter the object list. - `show_commit_fn show_commit`: A callback which will be used to handle each individual commit object. - `show_object_fn show_object`: A callback which will be used to handle each non-commit object (so each blob, tree, or tag). - `void *show_data`: A context buffer which is passed in turn to `show_commit` and `show_object`. + +In addition, `traverse_commit_list_filtered()` has an additional paramter: + - `struct oidset *omitted`: A linked-list of object IDs which the provided filter caused to be omitted. -It looks like this `traverse_commit_list_filtered()` uses callbacks we provide -instead of needing us to call it repeatedly ourselves. Cool! Let's add the -callbacks first. +It looks like these methods use callbacks we provide instead of needing us +to call it repeatedly ourselves. Cool! Let's add the callbacks first. For the sake of this tutorial, we'll simply keep track of how many of each kind of object we find. At file scope in `builtin/walken.c` add the following @@ -712,20 +713,9 @@ help understand. In our case, that means we omit trees and blobs not directly referenced by `HEAD` or `HEAD`'s history, because we begin the walk with only `HEAD` in the `pending` list.) -First, we'll need to `#include "list-objects-filter-options.h"` and set up the -`struct list_objects_filter_options` at the top of the function. - ----- -static void walken_object_walk(struct rev_info *rev) -{ - struct list_objects_filter_options filter_options = { 0 }; - - ... ----- - For now, we are not going to track the omitted objects, so we'll replace those parameters with `NULL`. For the sake of simplicity, we'll add a simple -build-time branch to use our filter or not. Replace the line calling +build-time branch to use our filter or not. Preface the line calling `traverse_commit_list()` with the following, which will remind us which kind of walk we've just performed: @@ -733,19 +723,17 @@ walk we've just performed: if (0) { /* Unfiltered: */ trace_printf(_("Unfiltered object walk.\n")); - traverse_commit_list(rev, walken_show_commit, - walken_show_object, NULL); } else { trace_printf( _("Filtered object walk with filterspec 'tree:1'.\n")); - parse_list_objects_filter(&filter_options, "tree:1"); - - traverse_commit_list_filtered(&filter_options, rev, - walken_show_commit, walken_show_object, NULL, NULL); + CALLOC_ARRAY(rev->filter, 1); + parse_list_objects_filter(rev->filter, "tree:1"); } + traverse_commit_list(rev, walken_show_commit, + walken_show_object, NULL); ---- -`struct list_objects_filter_options` is usually built directly from a command +The `rev->filter` member is usually built directly from a command line argument, so the module provides an easy way to build one from a string. Even though we aren't taking user input right now, we can still build one with a hardcoded string using `parse_list_objects_filter()`. @@ -784,7 +772,7 @@ object: ---- ... - traverse_commit_list_filtered(&filter_options, rev, + traverse_commit_list_filtered(rev, walken_show_commit, walken_show_object, NULL, &omitted); ... diff --git a/Documentation/RelNotes/2.36.0.txt b/Documentation/RelNotes/2.36.0.txt index dcb39fe56c..c4ca105e3c 100644 --- a/Documentation/RelNotes/2.36.0.txt +++ b/Documentation/RelNotes/2.36.0.txt @@ -16,7 +16,13 @@ Backward compatibility warts Note to those who build from the source - * + * Since Git 2.31, our source assumed that the compiler you use to + build Git supports variadic macros, with an easy-to-use escape + hatch to allow compilation without variadic macros with an request + to report that you had to use the escape hatch to the list. + Because we haven't heard from anybody who actually needed to use + the escape hatch, it has been removed, making support of variadic + macros a hard requirement. UI, Workflows & Features @@ -52,6 +58,32 @@ UI, Workflows & Features * The error message given by "git switch HEAD~4" has been clarified to suggest the "--detach" option that is required. + * In sparse-checkouts, files mis-marked as missing from the working tree + could lead to later problems. Such files were hard to discover, and + harder to correct. Automatically detecting and correcting the marking + of such files has been added to avoid these problems. + + * "git cat-file" learns "--batch-command" mode, which is a more + flexible interface than the existing "--batch" or "--batch-check" + modes, to allow different kinds of inquiries made. + + * The level of verbose output from the ort backend during inner merge + has been aligned to that of the recursive backend. + + * "git remote rename A B", depending on the number of remote-tracking + refs involved, takes long time renaming them. The command has been + taught to show progress bar while making the user wait. + + * Bundle file format gets extended to allow a partial bundle, + filtered by similar criteria you would give when making a + partial/lazy clone. + + * A new built-in userdiff driver for kotlin has been added. + + * "git repack" learned a new configuration to disable triggering of + age-old "update-server-info" command, which is rarely useful these + days. + Performance, Internal Implementation, Development Support etc. @@ -90,6 +122,42 @@ Performance, Internal Implementation, Development Support etc. * Use designated initializers we started using in mid 2017 in more parts of the codebase that are relatively quiescent. + * Improve failure case behaviour of xdiff library when memory + allocation fails. + + * General clean-up in reftable implementation, including + clarification of the API documentation, tightening the code to + honor documented length limit, etc. + + * Remove the escape hatch we added when we introduced the weather + balloon to use variadic macros unconditionally, to make it official + that we now have a hard dependency on the feature. + + * Makefile refactoring with a bit of suffixes rule stripping to + optimize the runtime overhead. + + * "git stash drop" is reimplemented as an internal call to + reflog_delete() function, instead of invoking "git reflog delete" + via run_command() API. + + * Count string_list items in size_t, not "unsigned int". + + * The single-key interactive operation used by "git add -p" has been + made more robust. + + * Remove unneeded <meta http-equiv=content-type...> from gitweb + output. + + * "git name-rev" learned to use the generation numbers when setting + the lower bound of searching commits used to explain the revision, + when available, instead of committer time. + + * Replace core.fsyncObjectFiles with two new configuration variables, + core.fsync and core.fsyncMethod. + + * Updates to refs traditionally weren't fsync'ed, but we can + configure using core.fsync variable to do so. + Fixes since v2.35 ----------------- @@ -253,6 +321,48 @@ Fixes since v2.35 recorded the last level component of the branch name, which has been corrected. + * "git fetch" can make two separate fetches, but ref updates coming + from them were in two separate ref transactions under "--atomic", + which has been corrected. + + * Check the return value from parse_tree_indirect() to turn segfaults + into calls to die(). + (merge 8d2eaf649a gc/parse-tree-indirect-errors later to maint). + + * Newer version of GPGSM changed its output in a backward + incompatible way to break our code that parses its output. It also + added more processes our tests need to kill when cleaning up. + Adjustments have been made to accommodate these changes. + (merge b0b70d54c4 fs/gpgsm-update later to maint). + + * The untracked cache newly computed weren't written back to the + on-disk index file when there is no other change to the index, + which has been corrected. + + * "git config -h" did not describe the "--type" option correctly. + (merge 5445124fad mf/fix-type-in-config-h later to maint). + + * The way generation number v2 in the commit-graph files are + (not) handled has been corrected. + (merge 6dbf4b8172 ds/commit-graph-gen-v2-fixes later to maint). + + * The method to trigger malloc check used in our tests no longer work + with newer versions of glibc. + (merge baedc59543 ep/test-malloc-check-with-glibc-2.34 later to maint). + + * When "git fetch --recurse-submodules" grabbed submodule commits + that would be needed to recursively check out newly fetched commits + in the superproject, it only paid attention to submodules that are + in the current checkout of the superproject. We now do so for all + submodules that have been run "git submodule init" on. + + * "git rebase $base $non_branch_commit", when $base is an ancestor or + the $non_branch_commit, modified the current branch, which has been + corrected. + + * When "shallow" information is updated, we forgot to update the + in-core equivalent, which has been corrected. + * Other code cleanup, docfix, build fix, etc. (merge cfc5cf428b jc/find-header later to maint). (merge 40e7cfdd46 jh/p4-fix-use-of-process-error-exception later to maint). @@ -277,3 +387,8 @@ Fixes since v2.35 (merge 332acc248d ds/mailmap later to maint). (merge 04bf052eef ab/grep-patterntype later to maint). (merge 6ee36364eb ab/diff-free-more later to maint). + (merge 63a36017fe nj/read-tree-doc-reffix later to maint). + (merge eed36fce38 sm/no-git-in-upstream-of-pipe-in-tests later to maint). + (merge c614beb933 ep/t6423-modernize later to maint). + (merge 57be9c6dee ab/reflog-prep-fix later to maint). + (merge 5327d8982a js/in-place-reverse-in-sequencer later to maint). diff --git a/Documentation/config.txt b/Documentation/config.txt index bf3e512921..f0fb25a371 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -503,6 +503,8 @@ include::config/sequencer.txt[] include::config/showbranch.txt[] +include::config/sparse.txt[] + include::config/splitindex.txt[] include::config/ssh.txt[] diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index c04f62a54a..9da3e5d88f 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -547,13 +547,64 @@ core.whitespace:: is relevant for `indent-with-non-tab` and when Git fixes `tab-in-indent` errors. The default tab width is 8. Allowed values are 1 to 63. +core.fsync:: + A comma-separated list of components of the repository that + should be hardened via the core.fsyncMethod when created or + modified. You can disable hardening of any component by + prefixing it with a '-'. Items that are not hardened may be + lost in the event of an unclean system shutdown. Unless you + have special requirements, it is recommended that you leave + this option empty or pick one of `committed`, `added`, + or `all`. ++ +When this configuration is encountered, the set of components starts with +the platform default value, disabled components are removed, and additional +components are added. `none` resets the state so that the platform default +is ignored. ++ +The empty string resets the fsync configuration to the platform +default. The default on most platforms is equivalent to +`core.fsync=committed,-loose-object`, which has good performance, +but risks losing recent work in the event of an unclean system shutdown. ++ +* `none` clears the set of fsynced components. +* `loose-object` hardens objects added to the repo in loose-object form. +* `pack` hardens objects added to the repo in packfile form. +* `pack-metadata` hardens packfile bitmaps and indexes. +* `commit-graph` hardens the commit graph file. +* `index` hardens the index when it is modified. +* `objects` is an aggregate option that is equivalent to + `loose-object,pack`. +* `reference` hardens references modified in the repo. +* `derived-metadata` is an aggregate option that is equivalent to + `pack-metadata,commit-graph`. +* `committed` is an aggregate option that is currently equivalent to + `objects`. This mode sacrifices some performance to ensure that work + that is committed to the repository with `git commit` or similar commands + is hardened. +* `added` is an aggregate option that is currently equivalent to + `committed,index`. This mode sacrifices additional performance to + ensure that the results of commands like `git add` and similar operations + are hardened. +* `all` is an aggregate option that syncs all individual components above. + +core.fsyncMethod:: + A value indicating the strategy Git will use to harden repository data + using fsync and related primitives. ++ +* `fsync` uses the fsync() system call or platform equivalents. +* `writeout-only` issues pagecache writeback requests, but depending on the + filesystem and storage hardware, data added to the repository may not be + durable in the event of a system crash. This is the default mode on macOS. + core.fsyncObjectFiles:: This boolean will enable 'fsync()' when writing object files. + This setting is deprecated. Use core.fsync instead. + -This is a total waste of time and effort on a filesystem that orders -data writes properly, but can be useful for filesystems that do not use -journalling (traditional UNIX filesystems) or that only journal metadata -and not file contents (OS X's HFS+, or Linux ext3 with "data=writeback"). +This setting affects data added to the Git repository in loose-object +form. When set to true, Git will issue an fsync or similar system call +to flush caches so that loose-objects remain consistent in the face +of a unclean system shutdown. core.preloadIndex:: Enable parallel index preload for operations like 'git diff' diff --git a/Documentation/config/repack.txt b/Documentation/config/repack.txt index 9c413e177e..41ac6953c8 100644 --- a/Documentation/config/repack.txt +++ b/Documentation/config/repack.txt @@ -25,3 +25,8 @@ repack.writeBitmaps:: space and extra time spent on the initial repack. This has no effect if multiple packfiles are created. Defaults to true on bare repos, false otherwise. + +repack.updateServerInfo:: + If set to false, linkgit:git-repack[1] will not run + linkgit:git-update-server-info[1]. Defaults to true. Can be overridden + when true by the `-n` option of linkgit:git-repack[1]. diff --git a/Documentation/config/sparse.txt b/Documentation/config/sparse.txt new file mode 100644 index 0000000000..aff49a8d3a --- /dev/null +++ b/Documentation/config/sparse.txt @@ -0,0 +1,27 @@ +sparse.expectFilesOutsideOfPatterns:: + Typically with sparse checkouts, files not matching any + sparsity patterns are marked with a SKIP_WORKTREE bit in the + index and are missing from the working tree. Accordingly, Git + will ordinarily check whether files with the SKIP_WORKTREE bit + are in fact present in the working tree contrary to + expectations. If Git finds any, it marks those paths as + present by clearing the relevant SKIP_WORKTREE bits. This + option can be used to tell Git that such + present-despite-skipped files are expected and to stop + checking for them. ++ +The default is `false`, which allows Git to automatically recover +from the list of files in the index and working tree falling out of +sync. ++ +Set this to `true` if you are in a setup where some external factor +relieves Git of the responsibility for maintaining the consistency +between the presence of working tree files and sparsity patterns. For +example, if you have a Git-aware virtual file system that has a robust +mechanism for keeping the working tree and the sparsity patterns up to +date based on access patterns. ++ +Regardless of this setting, Git does not check for +present-despite-skipped files unless sparse checkout is enabled, so +this config option has no effect unless `core.sparseCheckout` is +`true`. diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt index f903683189..6cdd9d43c5 100644 --- a/Documentation/fetch-options.txt +++ b/Documentation/fetch-options.txt @@ -186,15 +186,23 @@ endif::git-pull[] ifndef::git-pull[] --recurse-submodules[=yes|on-demand|no]:: This option controls if and under what conditions new commits of - populated submodules should be fetched too. It can be used as a - boolean option to completely disable recursion when set to 'no' or to - unconditionally recurse into all populated submodules when set to - 'yes', which is the default when this option is used without any - value. Use 'on-demand' to only recurse into a populated submodule - when the superproject retrieves a commit that updates the submodule's - reference to a commit that isn't already in the local submodule - clone. By default, 'on-demand' is used, unless - `fetch.recurseSubmodules` is set (see linkgit:git-config[1]). + submodules should be fetched too. When recursing through submodules, + `git fetch` always attempts to fetch "changed" submodules, that is, a + submodule that has commits that are referenced by a newly fetched + superproject commit but are missing in the local submodule clone. A + changed submodule can be fetched as long as it is present locally e.g. + in `$GIT_DIR/modules/` (see linkgit:gitsubmodules[7]); if the upstream + adds a new submodule, that submodule cannot be fetched until it is + cloned e.g. by `git submodule update`. ++ +When set to 'on-demand', only changed submodules are fetched. When set +to 'yes', all populated submodules are fetched and submodules that are +both unpopulated and changed are fetched. When set to 'no', submodules +are never fetched. ++ +When unspecified, this uses the value of `fetch.recurseSubmodules` if it +is set (see linkgit:git-config[1]), defaulting to 'on-demand' if unset. +When this option is used without any value, it defaults to 'yes'. endif::git-pull[] -j:: diff --git a/Documentation/git-bundle.txt b/Documentation/git-bundle.txt index 72ab813905..ac4c4352aa 100644 --- a/Documentation/git-bundle.txt +++ b/Documentation/git-bundle.txt @@ -75,8 +75,11 @@ verify <file>:: cleanly to the current repository. This includes checks on the bundle format itself as well as checking that the prerequisite commits exist and are fully linked in the current repository. - 'git bundle' prints a list of missing commits, if any, and exits - with a non-zero status. + Information about additional capabilities, such as "object filter", + is printed. See "Capabilities" in link:technical/bundle-format.html + for more information. Finally, 'git bundle' prints a list of + missing commits, if any. The exit code is zero for success, but + will be nonzero if the bundle file is invalid. list-heads <file>:: Lists the references defined in the bundle. If followed by a diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt index bef76f4dd0..70c5b4f12d 100644 --- a/Documentation/git-cat-file.txt +++ b/Documentation/git-cat-file.txt @@ -96,6 +96,33 @@ OPTIONS need to specify the path, separated by whitespace. See the section `BATCH OUTPUT` below for details. +--batch-command:: +--batch-command=<format>:: + Enter a command mode that reads commands and arguments from stdin. May + only be combined with `--buffer`, `--textconv` or `--filters`. In the + case of `--textconv` or `--filters`, the input lines also need to specify + the path, separated by whitespace. See the section `BATCH OUTPUT` below + for details. ++ +`--batch-command` recognizes the following commands: ++ +-- +contents <object>:: + Print object contents for object reference `<object>`. This corresponds to + the output of `--batch`. + +info <object>:: + Print object info for object reference `<object>`. This corresponds to the + output of `--batch-check`. + +flush:: + Used with `--buffer` to execute all preceding commands that were issued + since the beginning or since the last flush was issued. When `--buffer` + is used, no output will come until a `flush` is issued. When `--buffer` + is not used, commands are flushed each time without issuing `flush`. +-- ++ + --batch-all-objects:: Instead of reading a list of objects on stdin, perform the requested batch operation on all objects in the repository and @@ -110,7 +137,7 @@ OPTIONS that a process can interactively read and write from `cat-file`. With this option, the output uses normal stdio buffering; this is much more efficient when invoking - `--batch-check` on a large number of objects. + `--batch-check` or `--batch-command` on a large number of objects. --unordered:: When `--batch-all-objects` is in use, visit objects in an @@ -202,6 +229,13 @@ from stdin, one per line, and print information about them. By default, the whole line is considered as an object, as if it were fed to linkgit:git-rev-parse[1]. +When `--batch-command` is given, `cat-file` will read commands from stdin, +one per line, and print information based on the command given. With +`--batch-command`, the `info` command followed by an object will print +information about the object the same way `--batch-check` would, and the +`contents` command followed by an object prints contents in the same way +`--batch` would. + You can specify the information shown for each object by using a custom `<format>`. The `<format>` is copied literally to stdout for each object, with placeholders of the form `%(atom)` expanded, followed by a @@ -237,9 +271,9 @@ newline. The available atoms are: If no format is specified, the default format is `%(objectname) %(objecttype) %(objectsize)`. -If `--batch` is specified, the object information is followed by the -object contents (consisting of `%(objectsize)` bytes), followed by a -newline. +If `--batch` is specified, or if `--batch-command` is used with the `contents` +command, the object information is followed by the object contents (consisting +of `%(objectsize)` bytes), followed by a newline. For example, `--batch` without a custom format would produce: diff --git a/Documentation/git-fetch.txt b/Documentation/git-fetch.txt index 550c16ca61..e9d364669a 100644 --- a/Documentation/git-fetch.txt +++ b/Documentation/git-fetch.txt @@ -287,12 +287,10 @@ include::transfer-data-leaks.txt[] BUGS ---- -Using --recurse-submodules can only fetch new commits in already checked -out submodules right now. When e.g. upstream added a new submodule in the -just fetched commits of the superproject the submodule itself cannot be -fetched, making it impossible to check out that submodule later without -having to do a fetch again. This is expected to be fixed in a future Git -version. +Using --recurse-submodules can only fetch new commits in submodules that are +present locally e.g. in `$GIT_DIR/modules/`. If the upstream adds a new +submodule, that submodule cannot be fetched until it is cloned e.g. by `git +submodule update`. This is expected to be fixed in a future Git version. SEE ALSO -------- diff --git a/Documentation/git-help.txt b/Documentation/git-help.txt index 44ea63cc6d..239c68db45 100644 --- a/Documentation/git-help.txt +++ b/Documentation/git-help.txt @@ -8,8 +8,8 @@ git-help - Display help information about Git SYNOPSIS -------- [verse] -'git help' [-a|--all [--[no-]verbose]] - [[-i|--info] [-m|--man] [-w|--web]] [<command>|<guide>] +'git help' [-a|--all] [--[no-]verbose] [--[no-]external-commands] [--[no-]aliases] +'git help' [[-i|--info] [-m|--man] [-w|--web]] [<command>|<guide>] 'git help' [-g|--guides] 'git help' [-c|--config] @@ -46,8 +46,15 @@ OPTIONS ------- -a:: --all:: - Prints all the available commands on the standard output. This - option overrides any given command or guide name. + Prints all the available commands on the standard output. + +--no-external-commands:: + When used with `--all`, exclude the listing of external "git-*" + commands found in the `$PATH`. + +--no-aliases:: + When used with `--all`, exclude the listing of configured + aliases. --verbose:: When used with `--all` print description for all recognized diff --git a/Documentation/git-index-pack.txt b/Documentation/git-index-pack.txt index 1f1e359225..4e71c256ec 100644 --- a/Documentation/git-index-pack.txt +++ b/Documentation/git-index-pack.txt @@ -122,6 +122,14 @@ This option cannot be used with --stdin. + include::object-format-disclaimer.txt[] +--promisor[=<message>]:: + Before committing the pack-index, create a .promisor file for this + pack. Particularly helpful when writing a promisor pack with --fix-thin + since the name of the pack is not final until the pack has been fully + written. If a `<message>` is provided, then that content will be + written to the .promisor file for future reference. See + link:technical/partial-clone.html[partial clone] for more information. + NOTES ----- diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt index e2cfb68ab5..e56bad28c6 100644 --- a/Documentation/git-maintenance.txt +++ b/Documentation/git-maintenance.txt @@ -10,6 +10,8 @@ SYNOPSIS -------- [verse] 'git maintenance' run [<options>] +'git maintenance' start [--scheduler=<scheduler>] +'git maintenance' (stop|register|unregister) DESCRIPTION @@ -29,6 +31,24 @@ Git repository. SUBCOMMANDS ----------- +run:: + Run one or more maintenance tasks. If one or more `--task` options + are specified, then those tasks are run in that order. Otherwise, + the tasks are determined by which `maintenance.<task>.enabled` + config options are true. By default, only `maintenance.gc.enabled` + is true. + +start:: + Start running maintenance on the current repository. This performs + the same config updates as the `register` subcommand, then updates + the background scheduler to run `git maintenance run --scheduled` + on an hourly basis. + +stop:: + Halt the background maintenance schedule. The current repository + is not removed from the list of maintained repositories, in case + the background maintenance is restarted later. + register:: Initialize Git config values so any scheduled maintenance will start running on this repository. This adds the repository to the @@ -55,24 +75,6 @@ task: setting `maintenance.auto = false` in the current repository. This config setting will remain after a `git maintenance unregister` command. -run:: - Run one or more maintenance tasks. If one or more `--task` options - are specified, then those tasks are run in that order. Otherwise, - the tasks are determined by which `maintenance.<task>.enabled` - config options are true. By default, only `maintenance.gc.enabled` - is true. - -start:: - Start running maintenance on the current repository. This performs - the same config updates as the `register` subcommand, then updates - the background scheduler to run `git maintenance run --scheduled` - on an hourly basis. - -stop:: - Halt the background maintenance schedule. The current repository - is not removed from the list of maintained repositories, in case - the background maintenance is restarted later. - unregister:: Remove the current repository from background maintenance. This only removes the repository from the configured list. It does not diff --git a/Documentation/git-read-tree.txt b/Documentation/git-read-tree.txt index 8c3aceb832..a5356a230b 100644 --- a/Documentation/git-read-tree.txt +++ b/Documentation/git-read-tree.txt @@ -375,9 +375,14 @@ have finished your work-in-progress), attempt the merge again. SPARSE CHECKOUT --------------- +Note: The `update-index` and `read-tree` primitives for supporting the +skip-worktree bit predated the introduction of +linkgit:git-sparse-checkout[1]. Users are encouraged to use +`sparse-checkout` in preference to these low-level primitives. + "Sparse checkout" allows populating the working directory sparsely. -It uses the skip-worktree bit (see linkgit:git-update-index[1]) to tell -Git whether a file in the working directory is worth looking at. +It uses the skip-worktree bit (see linkgit:git-update-index[1]) to +tell Git whether a file in the working directory is worth looking at. 'git read-tree' and other merge-based commands ('git merge', 'git checkout'...) can help maintaining the skip-worktree bitmap and working @@ -385,7 +390,8 @@ directory update. `$GIT_DIR/info/sparse-checkout` is used to define the skip-worktree reference bitmap. When 'git read-tree' needs to update the working directory, it resets the skip-worktree bit in the index based on this file, which uses the same syntax as .gitignore files. -If an entry matches a pattern in this file, skip-worktree will not be +If an entry matches a pattern in this file, or the entry corresponds to +a file present in the working tree, then skip-worktree will not be set on that entry. Otherwise, skip-worktree will be set. Then it compares the new skip-worktree value with the previous one. If @@ -420,8 +426,8 @@ support. SEE ALSO -------- -linkgit:git-write-tree[1]; linkgit:git-ls-files[1]; -linkgit:gitignore[5]; linkgit:git-sparse-checkout[1]; +linkgit:git-write-tree[1], linkgit:git-ls-files[1], +linkgit:gitignore[5], linkgit:git-sparse-checkout[1] GIT --- diff --git a/Documentation/git-remote.txt b/Documentation/git-remote.txt index 2bebc32566..cde9614e36 100644 --- a/Documentation/git-remote.txt +++ b/Documentation/git-remote.txt @@ -11,7 +11,7 @@ SYNOPSIS [verse] 'git remote' [-v | --verbose] 'git remote add' [-t <branch>] [-m <master>] [-f] [--[no-]tags] [--mirror=(fetch|push)] <name> <URL> -'git remote rename' <old> <new> +'git remote rename' [--[no-]progress] <old> <new> 'git remote remove' <name> 'git remote set-head' <name> (-a | --auto | -d | --delete | <branch>) 'git remote set-branches' [--add] <name> <branch>... diff --git a/Documentation/git-sparse-checkout.txt b/Documentation/git-sparse-checkout.txt index 94dad137b9..88e55f432f 100644 --- a/Documentation/git-sparse-checkout.txt +++ b/Documentation/git-sparse-checkout.txt @@ -3,9 +3,7 @@ git-sparse-checkout(1) NAME ---- -git-sparse-checkout - Initialize and modify the sparse-checkout -configuration, which reduces the checkout to a set of paths -given by a list of patterns. +git-sparse-checkout - Reduce your working tree to a subset of tracked files SYNOPSIS @@ -17,8 +15,20 @@ SYNOPSIS DESCRIPTION ----------- -Initialize and modify the sparse-checkout configuration, which reduces -the checkout to a set of paths given by a list of patterns. +This command is used to create sparse checkouts, which means that it +changes the working tree from having all tracked files present, to only +have a subset of them. It can also switch which subset of files are +present, or undo and go back to having all tracked files present in the +working copy. + +The subset of files is chosen by providing a list of directories in +cone mode (which is recommended), or by providing a list of patterns +in non-cone mode. + +When in a sparse-checkout, other Git commands behave a bit differently. +For example, switching branches will not update paths outside the +sparse-checkout directories/patterns, and `git commit -a` will not record +paths outside the sparse-checkout directories/patterns as deleted. THIS COMMAND IS EXPERIMENTAL. ITS BEHAVIOR, AND THE BEHAVIOR OF OTHER COMMANDS IN THE PRESENCE OF SPARSE-CHECKOUTS, WILL LIKELY CHANGE IN @@ -28,7 +38,7 @@ THE FUTURE. COMMANDS -------- 'list':: - Describe the patterns in the sparse-checkout file. + Describe the directories or patterns in the sparse-checkout file. 'set':: Enable the necessary sparse-checkout config settings @@ -46,20 +56,26 @@ the 'set' subcommand are stored in the worktree-specific sparse-checkout file. See linkgit:git-worktree[1] and the documentation of `extensions.worktreeConfig` in linkgit:git-config[1] for more details. + -When the `--stdin` option is provided, the patterns are read from -standard in as a newline-delimited list instead of from the arguments. +When the `--stdin` option is provided, the directories or patterns are +read from standard in as a newline-delimited list instead of from the +arguments. + When `--cone` is passed or `core.sparseCheckoutCone` is enabled, the -input list is considered a list of directories instead of -sparse-checkout patterns. This allows for better performance with a -limited set of patterns (see 'CONE PATTERN SET' below). Note that the -set command will write patterns to the sparse-checkout file to include -all files contained in those directories (recursively) as well as -files that are siblings of ancestor directories. The input format -matches the output of `git ls-tree --name-only`. This includes -interpreting pathnames that begin with a double quote (") as C-style -quoted strings. This may become the default in the future; --no-cone -can be passed to request non-cone mode. +input list is considered a list of directories. This allows for +better performance with a limited set of patterns (see 'CONE PATTERN +SET' below). The input format matches the output of `git ls-tree +--name-only`. This includes interpreting pathnames that begin with a +double quote (") as C-style quoted strings. Note that the set command +will write patterns to the sparse-checkout file to include all files +contained in those directories (recursively) as well as files that are +siblings of ancestor directories. This may become the default in the +future; --no-cone can be passed to request non-cone mode. ++ +When `--no-cone` is passed or `core.sparseCheckoutCone` is not enabled, +the input list is considered a list of patterns. This mode is harder +to use and less performant, and is thus not recommended. See the +"Sparse Checkout" section of linkgit:git-read-tree[1] and the "Pattern +Set" sections below for more details. + Use the `--[no-]sparse-index` option to use a sparse index (the default is to not use it). A sparse index reduces the size of the @@ -77,11 +93,10 @@ understand the sparse directory entries index extension and may fail to interact with your repository until it is disabled. 'add':: - Update the sparse-checkout file to include additional patterns. - By default, these patterns are read from the command-line arguments, - but they can be read from stdin using the `--stdin` option. When - `core.sparseCheckoutCone` is enabled, the given patterns are interpreted - as directory names as in the 'set' subcommand. + Update the sparse-checkout file to include additional directories + (in cone mode) or patterns (in non-cone mode). By default, these + directories or patterns are read from the command-line arguments, + but they can be read from stdin using the `--stdin` option. 'reapply':: Reapply the sparsity pattern rules to paths in the working tree. @@ -125,13 +140,14 @@ decreased in utility. SPARSE CHECKOUT --------------- -"Sparse checkout" allows populating the working directory sparsely. -It uses the skip-worktree bit (see linkgit:git-update-index[1]) to tell -Git whether a file in the working directory is worth looking at. If -the skip-worktree bit is set, then the file is ignored in the working -directory. Git will avoid populating the contents of those files, which -makes a sparse checkout helpful when working in a repository with many -files, but only a few are important to the current user. +"Sparse checkout" allows populating the working directory sparsely. It +uses the skip-worktree bit (see linkgit:git-update-index[1]) to tell Git +whether a file in the working directory is worth looking at. If the +skip-worktree bit is set, and the file is not present in the working tree, +then its absence is ignored. Git will avoid populating the contents of +those files, which makes a sparse checkout helpful when working in a +repository with many files, but only a few are important to the current +user. The `$GIT_DIR/info/sparse-checkout` file is used to define the skip-worktree reference bitmap. When Git updates the working diff --git a/Documentation/git-update-index.txt b/Documentation/git-update-index.txt index 2853f168d9..568dbfe76b 100644 --- a/Documentation/git-update-index.txt +++ b/Documentation/git-update-index.txt @@ -351,6 +351,10 @@ unchanged". Note that "assume unchanged" bit is *not* set if the index (use `git update-index --really-refresh` if you want to mark them as "assume unchanged"). +Sometimes users confuse the assume-unchanged bit with the +skip-worktree bit. See the final paragraph in the "Skip-worktree bit" +section below for an explanation of the differences. + EXAMPLES -------- @@ -392,22 +396,47 @@ M foo.c SKIP-WORKTREE BIT ----------------- -Skip-worktree bit can be defined in one (long) sentence: When reading -an entry, if it is marked as skip-worktree, then Git pretends its -working directory version is up to date and read the index version -instead. - -To elaborate, "reading" means checking for file existence, reading -file attributes or file content. The working directory version may be -present or absent. If present, its content may match against the index -version or not. Writing is not affected by this bit, content safety -is still first priority. Note that Git _can_ update working directory -file, that is marked skip-worktree, if it is safe to do so (i.e. -working directory version matches index version) +Skip-worktree bit can be defined in one (long) sentence: Tell git to +avoid writing the file to the working directory when reasonably +possible, and treat the file as unchanged when it is not +present in the working directory. + +Note that not all git commands will pay attention to this bit, and +some only partially support it. + +The update-index flags and the read-tree capabilities relating to the +skip-worktree bit predated the introduction of the +linkgit:git-sparse-checkout[1] command, which provides a much easier +way to configure and handle the skip-worktree bits. If you want to +reduce your working tree to only deal with a subset of the files in +the repository, we strongly encourage the use of +linkgit:git-sparse-checkout[1] in preference to the low-level +update-index and read-tree primitives. + +The primary purpose of the skip-worktree bit is to enable sparse +checkouts, i.e. to have working directories with only a subset of +paths present. When the skip-worktree bit is set, Git commands (such +as `switch`, `pull`, `merge`) will avoid writing these files. +However, these commands will sometimes write these files anyway in +important cases such as conflicts during a merge or rebase. Git +commands will also avoid treating the lack of such files as an +intentional deletion; for example `git add -u` will not not stage a +deletion for these files and `git commit -a` will not make a commit +deleting them either. Although this bit looks similar to assume-unchanged bit, its goal is -different from assume-unchanged bit's. Skip-worktree also takes -precedence over assume-unchanged bit when both are set. +different. The assume-unchanged bit is for leaving the file in the +working tree but having Git omit checking it for changes and presuming +that the file has not been changed (though if it can determine without +stat'ing the file that it has changed, it is free to record the +changes). skip-worktree tells Git to ignore the absence of the file, +avoid updating it when possible with commands that normally update +much of the working directory (e.g. `checkout`, `switch`, `pull`, +etc.), and not have its absence be recorded in commits. Note that in +sparse checkouts (setup by `git sparse-checkout` or by configuring +core.sparseCheckout to true), if a file is marked as skip-worktree in +the index but is found in the working tree, Git will clear the +skip-worktree bit for that file. SPLIT INDEX ----------- diff --git a/Documentation/gitattributes.txt b/Documentation/gitattributes.txt index a71dad2674..4b36d51beb 100644 --- a/Documentation/gitattributes.txt +++ b/Documentation/gitattributes.txt @@ -829,6 +829,8 @@ patterns are available: - `java` suitable for source code in the Java language. +- `kotlin` suitable for source code in the Kotlin language. + - `markdown` suitable for Markdown documents. - `matlab` suitable for source code in the MATLAB and Octave languages. diff --git a/Documentation/technical/bundle-format.txt b/Documentation/technical/bundle-format.txt index bac558d049..b9be8644cf 100644 --- a/Documentation/technical/bundle-format.txt +++ b/Documentation/technical/bundle-format.txt @@ -71,6 +71,11 @@ and the Git bundle v2 format cannot represent a shallow clone repository. == Capabilities Because there is no opportunity for negotiation, unknown capabilities cause 'git -bundle' to abort. The only known capability is `object-format`, which specifies -the hash algorithm in use, and can take the same values as the -`extensions.objectFormat` configuration value. +bundle' to abort. + +* `object-format` specifies the hash algorithm in use, and can take the same + values as the `extensions.objectFormat` configuration value. + +* `filter` specifies an object filter as in the `--filter` option in + linkgit:git-rev-list[1]. The resulting pack-file must be marked as a + `.promisor` pack-file after it is unbundled. diff --git a/Documentation/technical/commit-graph-format.txt b/Documentation/technical/commit-graph-format.txt index 87971c27dd..484b185ba9 100644 --- a/Documentation/technical/commit-graph-format.txt +++ b/Documentation/technical/commit-graph-format.txt @@ -93,7 +93,7 @@ CHUNK DATA: 2 bits of the lowest byte, storing the 33rd and 34th bit of the commit time. - Generation Data (ID: {'G', 'D', 'A', 'T' }) (N * 4 bytes) [Optional] + Generation Data (ID: {'G', 'D', 'A', '2' }) (N * 4 bytes) [Optional] * This list of 4-byte values store corrected commit date offsets for the commits, arranged in the same order as commit data chunk. * If the corrected commit date offset cannot be stored within 31 bits, @@ -104,7 +104,7 @@ CHUNK DATA: by compatible versions of Git and in case of split commit-graph chains, the topmost layer also has Generation Data chunk. - Generation Data Overflow (ID: {'G', 'D', 'O', 'V' }) [Optional] + Generation Data Overflow (ID: {'G', 'D', 'O', '2' }) [Optional] * This list of 8-byte values stores the corrected commit date offsets for commits with corrected commit date offsets that cannot be stored within 31 bits. @@ -156,3 +156,11 @@ CHUNK DATA: TRAILER: H-byte HASH-checksum of all of the above. + +== Historical Notes: + +The Generation Data (GDA2) and Generation Data Overflow (GDO2) chunks have +the number '2' in their chunk IDs because a previous version of Git wrote +possibly erroneous data in these chunks with the IDs "GDAT" and "GDOV". By +changing the IDs, newer versions of Git will silently ignore those older +chunks and write the new information without trusting the incorrect data. diff --git a/Documentation/technical/reftable.txt b/Documentation/technical/reftable.txt index d7c3b645cf..6a67cc4174 100644 --- a/Documentation/technical/reftable.txt +++ b/Documentation/technical/reftable.txt @@ -443,7 +443,7 @@ Obj block format Object blocks are optional. Writers may choose to omit object blocks, especially if readers will not use the object name to ref mapping. -Object blocks use unique, abbreviated 2-32 object name keys, mapping to +Object blocks use unique, abbreviated 2-31 byte object name keys, mapping to ref blocks containing references pointing to that object directly, or as the peeled value of an annotated tag. Like ref blocks, object blocks use the file's standard block size. The abbreviation length is available in @@ -1,6 +1,9 @@ # The default target of this Makefile is... all:: +# Import tree-wide shared Makefile behavior and libraries +include shared.mak + # Define V=1 to have a more verbose compile. # # Define SHELL_PATH to a POSIX shell if your /bin/sh is broken. @@ -411,6 +414,8 @@ all:: # # Define HAVE_CLOCK_MONOTONIC if your platform has CLOCK_MONOTONIC. # +# Define HAVE_SYNC_FILE_RANGE if your platform has sync_file_range. +# # Define NEEDS_LIBRT if your platform requires linking with librt (glibc version # before 2.17) for clock_gettime and CLOCK_MONOTONIC. # @@ -830,12 +835,33 @@ GENERATED_H += hook-list.h .PHONY: generated-hdrs generated-hdrs: $(GENERATED_H) -LIB_H := $(sort $(patsubst ./%,%,$(shell git ls-files '*.h' ':!t/' ':!Documentation/' 2>/dev/null || \ +## Exhaustive lists of our source files, either dynamically generated, +## or hardcoded. +SOURCES_CMD = ( \ + git ls-files \ + '*.[hcS]' \ + '*.sh' \ + ':!*[tp][0-9][0-9][0-9][0-9]*' \ + ':!contrib' \ + 2>/dev/null || \ $(FIND) . \ - -name .git -prune -o \ - -name t -prune -o \ - -name Documentation -prune -o \ - -name '*.h' -print))) + \( -name .git -type d -prune \) \ + -o \( -name '[tp][0-9][0-9][0-9][0-9]*' -prune \) \ + -o \( -name contrib -type d -prune \) \ + -o \( -name build -type d -prune \) \ + -o \( -name 'trash*' -type d -prune \) \ + -o \( -name '*.[hcS]' -type f -print \) \ + -o \( -name '*.sh' -type f -print \) \ + | sed -e 's|^\./||' \ + ) +FOUND_SOURCE_FILES := $(shell $(SOURCES_CMD)) + +FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES)) +FOUND_H_SOURCES = $(filter %.h,$(FOUND_SOURCE_FILES)) + +COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES)) + +LIB_H = $(FOUND_H_SOURCES) LIB_OBJS += abspath.o LIB_OBJS += add-interactive.o @@ -989,6 +1015,7 @@ LIB_OBJS += rebase-interactive.o LIB_OBJS += rebase.o LIB_OBJS += ref-filter.o LIB_OBJS += reflog-walk.o +LIB_OBJS += reflog.o LIB_OBJS += refs.o LIB_OBJS += refs/debug.o LIB_OBJS += refs/files-backend.o @@ -1265,10 +1292,6 @@ endif ALL_CFLAGS = $(DEVELOPER_CFLAGS) $(CPPFLAGS) $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) -comma := , -empty := -space := $(empty) $(empty) - ifdef SANITIZE SANITIZERS := $(foreach flag,$(subst $(comma),$(space),$(SANITIZE)),$(flag)) BASIC_CFLAGS += -fsanitize=$(SANITIZE) -fno-sanitize-recover=$(SANITIZE) @@ -1897,6 +1920,10 @@ ifdef HAVE_CLOCK_MONOTONIC BASIC_CFLAGS += -DHAVE_CLOCK_MONOTONIC endif +ifdef HAVE_SYNC_FILE_RANGE + BASIC_CFLAGS += -DHAVE_SYNC_FILE_RANGE +endif + ifdef NEEDS_LIBRT EXTLIBS += -lrt endif @@ -1981,39 +2008,6 @@ ifndef PAGER_ENV PAGER_ENV = LESS=FRX LV=-c endif -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir -QUIET_SUBDIR1 = - -ifneq ($(findstring w,$(MAKEFLAGS)),w) -PRINT_DIR = --no-print-directory -else # "make -w" -NO_SUBDIR = : -endif - -ifneq ($(findstring s,$(MAKEFLAGS)),s) -ifndef V - QUIET_CC = @echo ' ' CC $@; - QUIET_AR = @echo ' ' AR $@; - QUIET_LINK = @echo ' ' LINK $@; - QUIET_BUILT_IN = @echo ' ' BUILTIN $@; - QUIET_GEN = @echo ' ' GEN $@; - QUIET_LNCP = @echo ' ' LN/CP $@; - QUIET_XGETTEXT = @echo ' ' XGETTEXT $@; - QUIET_MSGFMT = @echo ' ' MSGFMT $@; - QUIET_GCOV = @echo ' ' GCOV $@; - QUIET_SP = @echo ' ' SP $<; - QUIET_HDR = @echo ' ' HDR $(<:hcc=h); - QUIET_RC = @echo ' ' RC $@; - QUIET_SPATCH = @echo ' ' SPATCH $<; - QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ - $(MAKE) $(PRINT_DIR) -C $$subdir - export V - export QUIET_GEN - export QUIET_BUILT_IN -endif -endif - ifdef NO_INSTALL_HARDLINKS export NO_INSTALL_HARDLINKS endif @@ -2194,16 +2188,6 @@ shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell strip: $(PROGRAMS) git$X $(STRIP) $(STRIP_OPTS) $^ -### Flags affecting all rules - -# A GNU make extension since gmake 3.72 (released in late 1994) to -# remove the target of rules if commands in those rules fail. The -# default is to only do that if make itself receives a signal. Affects -# all targets, see: -# -# info make --index-search=.DELETE_ON_ERROR -.DELETE_ON_ERROR: - ### Target-specific flags and dependencies # The generic compilation pattern rule and automatically @@ -2566,8 +2550,6 @@ ASM_SRC := $(wildcard $(OBJECTS:o=S)) ASM_OBJ := $(ASM_SRC:S=o) C_OBJ := $(filter-out $(ASM_OBJ),$(OBJECTS)) -.SUFFIXES: - $(C_OBJ): %.o: %.c GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir) $(QUIET_CC)$(CC) -o $*.o -c $(dep_args) $(compdb_args) $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $< $(ASM_OBJ): %.o: %.S GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir) @@ -2770,7 +2752,8 @@ all:: $(MOFILES) endif po/build/locale/%/LC_MESSAGES/git.mo: po/%.po - $(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $< + $(call mkdir_p_parent_template) + $(QUIET_MSGFMT)$(MSGFMT) -o $@ $< LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm) LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL)) @@ -2786,35 +2769,16 @@ NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS)) endif perl/build/lib/%.pm: perl/%.pm GIT-PERL-DEFINES - $(QUIET_GEN)mkdir -p $(dir $@) && \ + $(call mkdir_p_parent_template) + $(QUIET_GEN) \ sed -e 's|@@LOCALEDIR@@|$(perl_localedir_SQ)|g' \ -e 's|@@NO_GETTEXT@@|$(NO_GETTEXT_SQ)|g' \ -e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \ < $< > $@ perl/build/man/man3/Git.3pm: perl/Git.pm - $(QUIET_GEN)mkdir -p $(dir $@) && \ - pod2man $< $@ - -FIND_SOURCE_FILES = ( \ - git ls-files \ - '*.[hcS]' \ - '*.sh' \ - ':!*[tp][0-9][0-9][0-9][0-9]*' \ - ':!contrib' \ - 2>/dev/null || \ - $(FIND) . \ - \( -name .git -type d -prune \) \ - -o \( -name '[tp][0-9][0-9][0-9][0-9]*' -prune \) \ - -o \( -name contrib -type d -prune \) \ - -o \( -name build -type d -prune \) \ - -o \( -name 'trash*' -type d -prune \) \ - -o \( -name '*.[hcS]' -type f -print \) \ - -o \( -name '*.sh' -type f -print \) \ - | sed -e 's|^\./||' \ - ) - -FOUND_SOURCE_FILES = $(shell $(FIND_SOURCE_FILES)) + $(call mkdir_p_parent_template) + $(QUIET_GEN)pod2man $< $@ $(ETAGS_TARGET): $(FOUND_SOURCE_FILES) $(QUIET_GEN)$(RM) $@+ && \ @@ -2948,7 +2912,7 @@ test_bindir_programs := $(patsubst %,bin-wrappers/%,$(BINDIR_PROGRAMS_NEED_X) $( all:: $(TEST_PROGRAMS) $(test_bindir_programs) bin-wrappers/%: wrap-for-bin.sh - @mkdir -p bin-wrappers + $(call mkdir_p_parent_template) $(QUIET_GEN)sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ -e 's|@@BUILD_DIR@@|$(shell pwd)|' \ -e 's|@@PROG@@|$(patsubst test-%,t/helper/test-%$(X),$(@F))$(patsubst git%,$(X),$(filter $(@F),$(BINDIR_PROGRAMS_NEED_X)))|' < $< > $@ && \ @@ -3025,9 +2989,6 @@ check: $(GENERATED_H) exit 1; \ fi -FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES)) -COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES)) - %.cocci.patch: %.cocci $(COCCI_SOURCES) $(QUIET_SPATCH) \ if test $(SPATCH_BATCH_SIZE) = 0; then \ diff --git a/add-interactive.c b/add-interactive.c index e1ab39cce3..7247210301 100644 --- a/add-interactive.c +++ b/add-interactive.c @@ -70,6 +70,8 @@ void init_add_i_state(struct add_i_state *s, struct repository *r) &s->interactive_diff_algorithm); git_config_get_bool("interactive.singlekey", &s->use_single_key); + if (s->use_single_key) + setbuf(stdin, NULL); } void clear_add_i_state(struct add_i_state *s) @@ -219,13 +219,18 @@ static void free_fragment_list(struct fragment *list) } } -static void free_patch(struct patch *patch) +void release_patch(struct patch *patch) { free_fragment_list(patch->fragments); free(patch->def_name); free(patch->old_name); free(patch->new_name); free(patch->result); +} + +static void free_patch(struct patch *patch) +{ + release_patch(patch); free(patch); } @@ -3159,7 +3164,7 @@ static int apply_binary(struct apply_state *state, * See if the old one matches what the patch * applies to. */ - hash_object_file(the_hash_algo, img->buf, img->len, blob_type, + hash_object_file(the_hash_algo, img->buf, img->len, OBJ_BLOB, &oid); if (strcmp(oid_to_hex(&oid), patch->old_oid_prefix)) return error(_("the patch applies to '%s' (%s), " @@ -3205,7 +3210,7 @@ static int apply_binary(struct apply_state *state, name); /* verify that the result matches */ - hash_object_file(the_hash_algo, img->buf, img->len, blob_type, + hash_object_file(the_hash_algo, img->buf, img->len, OBJ_BLOB, &oid); if (strcmp(oid_to_hex(&oid), patch->new_oid_prefix)) return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"), @@ -3594,7 +3599,7 @@ static int try_threeway(struct apply_state *state, /* Preimage the patch was prepared for */ if (patch->is_new) - write_object_file("", 0, blob_type, &pre_oid); + write_object_file("", 0, OBJ_BLOB, &pre_oid); else if (get_oid(patch->old_oid_prefix, &pre_oid) || read_blob_object(&buf, &pre_oid, patch->old_mode)) return error(_("repository lacks the necessary blob to perform 3-way merge.")); @@ -3610,7 +3615,7 @@ static int try_threeway(struct apply_state *state, return -1; } /* post_oid is theirs */ - write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid); + write_object_file(tmp_image.buf, tmp_image.len, OBJ_BLOB, &post_oid); clear_image(&tmp_image); /* our_oid is ours */ @@ -3623,7 +3628,7 @@ static int try_threeway(struct apply_state *state, return error(_("cannot read the current contents of '%s'"), patch->old_name); } - write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid); + write_object_file(tmp_image.buf, tmp_image.len, OBJ_BLOB, &our_oid); clear_image(&tmp_image); /* in-core three-way merge between post and our using pre as base */ @@ -4323,7 +4328,7 @@ static int add_index_file(struct apply_state *state, } fill_stat_cache_info(state->repo->index, ce, &st); } - if (write_object_file(buf, size, blob_type, &ce->oid) < 0) { + if (write_object_file(buf, size, OBJ_BLOB, &ce->oid) < 0) { discard_cache_entry(ce); return error(_("unable to create backing store " "for newly created file %s"), path); @@ -173,6 +173,8 @@ int parse_git_diff_header(struct strbuf *root, unsigned int size, struct patch *patch); +void release_patch(struct patch *patch); + /* * Some aspects of the apply behavior are controlled by the following * bits in the "options" parameter passed to apply_all_patches(). @@ -14,7 +14,6 @@ #include "utf8.h" #include "quote.h" #include "thread-utils.h" -#include "dir.h" const char git_attr__true[] = "(builtin)true"; const char git_attr__false[] = "\0(builtin)false"; @@ -80,7 +79,7 @@ static int attr_hash_entry_cmp(const void *unused_cmp_data, * Access to this dictionary must be surrounded with a mutex. */ static struct attr_hashmap g_attr_hashmap = { - HASHMAP_INIT(attr_hash_entry_cmp, NULL) + .map = HASHMAP_INIT(attr_hash_entry_cmp, NULL), }; /* @@ -121,7 +121,6 @@ struct git_attr; /* opaque structures used internally for attribute collection */ struct all_attrs_item; struct attr_stack; -struct index_state; /* * Given a string, return the gitattribute object that @@ -21,13 +21,8 @@ #undef sprintf #undef vsprintf -#ifdef HAVE_VARIADIC_MACROS #define sprintf(...) BANNED(sprintf) #define vsprintf(...) BANNED(vsprintf) -#else -#define sprintf(buf,fmt,arg) BANNED(sprintf) -#define vsprintf(buf,fmt,arg) BANNED(vsprintf) -#endif #undef gmtime #define gmtime(t) BANNED(gmtime) diff --git a/block-sha1/sha1.c b/block-sha1/sha1.c index 1bb6e7c069..5974cd7dd3 100644 --- a/block-sha1/sha1.c +++ b/block-sha1/sha1.c @@ -11,27 +11,10 @@ #include "sha1.h" -#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) - -/* - * Force usage of rol or ror by selecting the one with the smaller constant. - * It _can_ generate slightly smaller code (a constant of 1 is special), but - * perhaps more importantly it's possibly faster on any uarch that does a - * rotate with a loop. - */ - -#define SHA_ASM(op, x, n) ({ unsigned int __res; __asm__(op " %1,%0":"=r" (__res):"i" (n), "0" (x)); __res; }) -#define SHA_ROL(x,n) SHA_ASM("rol", x, n) -#define SHA_ROR(x,n) SHA_ASM("ror", x, n) - -#else - #define SHA_ROT(X,l,r) (((X) << (l)) | ((X) >> (r))) #define SHA_ROL(X,n) SHA_ROT(X,n,32-(n)) #define SHA_ROR(X,n) SHA_ROT(X,32-(n),n) -#endif - /* * If you have 32 registers or more, the compiler can (and should) * try to change the array[] accesses into registers. However, on diff --git a/builtin/bundle.c b/builtin/bundle.c index 5a85d7cd0f..2adad545a2 100644 --- a/builtin/bundle.c +++ b/builtin/bundle.c @@ -93,6 +93,7 @@ static int cmd_bundle_create(int argc, const char **argv, const char *prefix) { if (!startup_info->have_repository) die(_("Need a repository to create a bundle.")); ret = !!create_bundle(the_repository, bundle_file, argc, argv, &pack_opts, version); + strvec_clear(&pack_opts); free(bundle_file); return ret; } diff --git a/builtin/cat-file.c b/builtin/cat-file.c index 7b3f42950e..50cf38999d 100644 --- a/builtin/cat-file.c +++ b/builtin/cat-file.c @@ -17,14 +17,20 @@ #include "object-store.h" #include "promisor-remote.h" +enum batch_mode { + BATCH_MODE_CONTENTS, + BATCH_MODE_INFO, + BATCH_MODE_QUEUE_AND_DISPATCH, +}; + struct batch_options { int enabled; int follow_symlinks; - int print_contents; + enum batch_mode batch_mode; int buffer_output; int all_objects; int unordered; - int cmdmode; /* may be 'w' or 'c' for --filters or --textconv */ + int transform_mode; /* may be 'w' or 'c' for --filters or --textconv */ const char *format; }; @@ -150,7 +156,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, break; case 0: - if (type_from_string(exp_type) == OBJ_BLOB) { + { + enum object_type exp_type_id = type_from_string(exp_type); + + if (exp_type_id == OBJ_BLOB) { struct object_id blob_oid; if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) { char *buffer = read_object_file(&oid, &type, @@ -172,10 +181,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, * fall-back to the usual case. */ } - buf = read_object_with_reference(the_repository, - &oid, exp_type, &size, NULL); + buf = read_object_with_reference(the_repository, &oid, + exp_type_id, &size, NULL); break; - + } default: die("git cat-file: unknown option: %s", exp_type); } @@ -302,19 +311,19 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d if (data->type == OBJ_BLOB) { if (opt->buffer_output) fflush(stdout); - if (opt->cmdmode) { + if (opt->transform_mode) { char *contents; unsigned long size; if (!data->rest) die("missing path for '%s'", oid_to_hex(oid)); - if (opt->cmdmode == 'w') { + if (opt->transform_mode == 'w') { if (filter_object(data->rest, 0100644, oid, &contents, &size)) die("could not convert '%s' %s", oid_to_hex(oid), data->rest); - } else if (opt->cmdmode == 'c') { + } else if (opt->transform_mode == 'c') { enum object_type type; if (!textconv_object(the_repository, data->rest, 0100644, oid, @@ -326,7 +335,7 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d die("could not convert '%s' %s", oid_to_hex(oid), data->rest); } else - BUG("invalid cmdmode: %c", opt->cmdmode); + BUG("invalid transform_mode: %c", opt->transform_mode); batch_write(opt, contents, size); free(contents); } else { @@ -351,6 +360,13 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d } } +static void print_default_format(struct strbuf *scratch, struct expand_data *data) +{ + strbuf_addf(scratch, "%s %s %"PRIuMAX"\n", oid_to_hex(&data->oid), + type_name(data->type), + (uintmax_t)data->size); +} + /* * If "pack" is non-NULL, then "offset" is the byte offset within the pack from * which the object may be accessed (though note that we may also rely on @@ -382,11 +398,17 @@ static void batch_object_write(const char *obj_name, } strbuf_reset(scratch); - strbuf_expand(scratch, opt->format, expand_format, data); - strbuf_addch(scratch, '\n'); + + if (!opt->format) { + print_default_format(scratch, data); + } else { + strbuf_expand(scratch, opt->format, expand_format, data); + strbuf_addch(scratch, '\n'); + } + batch_write(opt, scratch->buf, scratch->len); - if (opt->print_contents) { + if (opt->batch_mode == BATCH_MODE_CONTENTS) { print_object_or_die(opt, data); batch_write(opt, "\n", 1); } @@ -508,6 +530,137 @@ static int batch_unordered_packed(const struct object_id *oid, data); } +typedef void (*parse_cmd_fn_t)(struct batch_options *, const char *, + struct strbuf *, struct expand_data *); + +struct queued_cmd { + parse_cmd_fn_t fn; + char *line; +}; + +static void parse_cmd_contents(struct batch_options *opt, + const char *line, + struct strbuf *output, + struct expand_data *data) +{ + opt->batch_mode = BATCH_MODE_CONTENTS; + batch_one_object(line, output, opt, data); +} + +static void parse_cmd_info(struct batch_options *opt, + const char *line, + struct strbuf *output, + struct expand_data *data) +{ + opt->batch_mode = BATCH_MODE_INFO; + batch_one_object(line, output, opt, data); +} + +static void dispatch_calls(struct batch_options *opt, + struct strbuf *output, + struct expand_data *data, + struct queued_cmd *cmd, + int nr) +{ + int i; + + if (!opt->buffer_output) + die(_("flush is only for --buffer mode")); + + for (i = 0; i < nr; i++) + cmd[i].fn(opt, cmd[i].line, output, data); + + fflush(stdout); +} + +static void free_cmds(struct queued_cmd *cmd, size_t *nr) +{ + size_t i; + + for (i = 0; i < *nr; i++) + FREE_AND_NULL(cmd[i].line); + + *nr = 0; +} + + +static const struct parse_cmd { + const char *name; + parse_cmd_fn_t fn; + unsigned takes_args; +} commands[] = { + { "contents", parse_cmd_contents, 1}, + { "info", parse_cmd_info, 1}, + { "flush", NULL, 0}, +}; + +static void batch_objects_command(struct batch_options *opt, + struct strbuf *output, + struct expand_data *data) +{ + struct strbuf input = STRBUF_INIT; + struct queued_cmd *queued_cmd = NULL; + size_t alloc = 0, nr = 0; + + while (!strbuf_getline(&input, stdin)) { + int i; + const struct parse_cmd *cmd = NULL; + const char *p = NULL, *cmd_end; + struct queued_cmd call = {0}; + + if (!input.len) + die(_("empty command in input")); + if (isspace(*input.buf)) + die(_("whitespace before command: '%s'"), input.buf); + + for (i = 0; i < ARRAY_SIZE(commands); i++) { + if (!skip_prefix(input.buf, commands[i].name, &cmd_end)) + continue; + + cmd = &commands[i]; + if (cmd->takes_args) { + if (*cmd_end != ' ') + die(_("%s requires arguments"), + commands[i].name); + + p = cmd_end + 1; + } else if (*cmd_end) { + die(_("%s takes no arguments"), + commands[i].name); + } + + break; + } + + if (!cmd) + die(_("unknown command: '%s'"), input.buf); + + if (!strcmp(cmd->name, "flush")) { + dispatch_calls(opt, output, data, queued_cmd, nr); + free_cmds(queued_cmd, &nr); + } else if (!opt->buffer_output) { + cmd->fn(opt, p, output, data); + } else { + ALLOC_GROW(queued_cmd, nr + 1, alloc); + call.fn = cmd->fn; + call.line = xstrdup_or_null(p); + queued_cmd[nr++] = call; + } + } + + if (opt->buffer_output && + nr && + !git_env_bool("GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT", 0)) { + dispatch_calls(opt, output, data, queued_cmd, nr); + free_cmds(queued_cmd, &nr); + } + + free(queued_cmd); + strbuf_release(&input); +} + +#define DEFAULT_FORMAT "%(objectname) %(objecttype) %(objectsize)" + static int batch_objects(struct batch_options *opt) { struct strbuf input = STRBUF_INIT; @@ -516,9 +669,6 @@ static int batch_objects(struct batch_options *opt) int save_warning; int retval = 0; - if (!opt->format) - opt->format = "%(objectname) %(objecttype) %(objectsize)"; - /* * Expand once with our special mark_query flag, which will prime the * object_info to be handed to oid_object_info_extended for each @@ -526,17 +676,22 @@ static int batch_objects(struct batch_options *opt) */ memset(&data, 0, sizeof(data)); data.mark_query = 1; - strbuf_expand(&output, opt->format, expand_format, &data); + strbuf_expand(&output, + opt->format ? opt->format : DEFAULT_FORMAT, + expand_format, + &data); data.mark_query = 0; strbuf_release(&output); - if (opt->cmdmode) + if (opt->transform_mode) data.split_on_whitespace = 1; + if (opt->format && !strcmp(opt->format, DEFAULT_FORMAT)) + opt->format = NULL; /* * If we are printing out the object, then always fill in the type, * since we will want to decide whether or not to stream. */ - if (opt->print_contents) + if (opt->batch_mode == BATCH_MODE_CONTENTS) data.info.typep = &data.type; if (opt->all_objects) { @@ -590,6 +745,11 @@ static int batch_objects(struct batch_options *opt) save_warning = warn_on_object_refname_ambiguity; warn_on_object_refname_ambiguity = 0; + if (opt->batch_mode == BATCH_MODE_QUEUE_AND_DISPATCH) { + batch_objects_command(opt, &output, &data); + goto cleanup; + } + while (strbuf_getline(&input, stdin) != EOF) { if (data.split_on_whitespace) { /* @@ -608,6 +768,7 @@ static int batch_objects(struct batch_options *opt) batch_one_object(input.buf, &output, opt, &data); } + cleanup: strbuf_release(&input); strbuf_release(&output); warn_on_object_refname_ambiguity = save_warning; @@ -635,7 +796,16 @@ static int batch_option_callback(const struct option *opt, } bo->enabled = 1; - bo->print_contents = !strcmp(opt->long_name, "batch"); + + if (!strcmp(opt->long_name, "batch")) + bo->batch_mode = BATCH_MODE_CONTENTS; + else if (!strcmp(opt->long_name, "batch-check")) + bo->batch_mode = BATCH_MODE_INFO; + else if (!strcmp(opt->long_name, "batch-command")) + bo->batch_mode = BATCH_MODE_QUEUE_AND_DISPATCH; + else + BUG("%s given to batch-option-callback", opt->long_name); + bo->format = arg; return 0; @@ -654,7 +824,7 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) N_("git cat-file <type> <object>"), N_("git cat-file (-e | -p) <object>"), N_("git cat-file (-t | -s) [--allow-unknown-type] <object>"), - N_("git cat-file (--batch | --batch-check) [--batch-all-objects]\n" + N_("git cat-file (--batch | --batch-check | --batch-command) [--batch-all-objects]\n" " [--buffer] [--follow-symlinks] [--unordered]\n" " [--textconv | --filters]"), N_("git cat-file (--textconv | --filters)\n" @@ -683,6 +853,10 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) N_("like --batch, but don't emit <contents>"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, batch_option_callback), + OPT_CALLBACK_F(0, "batch-command", &batch, N_("format"), + N_("read commands from stdin"), + PARSE_OPT_OPTARG | PARSE_OPT_NONEG, + batch_option_callback), OPT_CMDMODE(0, "batch-all-objects", &opt, N_("with --batch[-check]: ignores stdin, batches all known objects"), 'b'), /* Batch-specific options */ @@ -742,7 +916,7 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) /* Return early if we're in batch mode? */ if (batch.enabled) { if (opt_cw) - batch.cmdmode = opt; + batch.transform_mode = opt; else if (opt && opt != 'b') usage_msg_optf(_("'-%c' is incompatible with batch mode"), usage, options, opt); diff --git a/builtin/checkout.c b/builtin/checkout.c index 9244827ca0..797681481d 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -303,7 +303,7 @@ static int checkout_merged(int pos, const struct checkout *state, * (it also writes the merge result to the object database even * when it may contain conflicts). */ - if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid)) + if (write_object_file(result_buf.ptr, result_buf.size, OBJ_BLOB, &oid)) die(_("Unable to add merge result for '%s'"), path); free(result_buf.ptr); ce = make_transient_cache_entry(mode, &oid, path, 2, ce_mem_pool); @@ -738,6 +738,7 @@ static int merge_working_tree(const struct checkout_opts *opts, struct tree_desc trees[2]; struct tree *tree; struct unpack_trees_options topts; + const struct object_id *old_commit_oid; memset(&topts, 0, sizeof(topts)); topts.head_idx = -1; @@ -765,9 +766,15 @@ static int merge_working_tree(const struct checkout_opts *opts, &new_branch_info->commit->object.oid : &new_branch_info->oid, NULL); topts.preserve_ignored = !opts->overwrite_ignore; - tree = parse_tree_indirect(old_branch_info->commit ? - &old_branch_info->commit->object.oid : - the_hash_algo->empty_tree); + + old_commit_oid = old_branch_info->commit ? + &old_branch_info->commit->object.oid : + the_hash_algo->empty_tree; + tree = parse_tree_indirect(old_commit_oid); + if (!tree) + die(_("unable to parse commit %s"), + oid_to_hex(old_commit_oid)); + init_tree_desc(&trees[0], tree->buffer, tree->size); parse_tree(new_tree); tree = new_tree; diff --git a/builtin/clone.c b/builtin/clone.c index a572cda503..5231656379 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -33,6 +33,7 @@ #include "packfile.h" #include "list-objects-filter-options.h" #include "hook.h" +#include "bundle.h" /* * Overall FIXMEs: @@ -700,6 +701,8 @@ static int checkout(int submodule_progress, int filter_submodules) init_checkout_metadata(&opts.meta, head, &oid, NULL); tree = parse_tree_indirect(&oid); + if (!tree) + die(_("unable to parse commit %s"), oid_to_hex(&oid)); parse_tree(tree); init_tree_desc(&t, tree->buffer, tree->size); if (unpack_trees(1, &t, &opts) < 0) @@ -1170,6 +1173,18 @@ int cmd_clone(int argc, const char **argv, const char *prefix) warning(_("--local is ignored")); transport->cloning = 1; + if (is_bundle) { + struct bundle_header header = BUNDLE_HEADER_INIT; + int fd = read_bundle_header(path, &header); + int has_filter = header.filter.choice != LOFC_DISABLED; + + if (fd > 0) + close(fd); + bundle_header_release(&header); + if (has_filter) + die(_("cannot clone from filtered bundle")); + } + transport_set_option(transport, TRANS_OPT_KEEP, "yes"); if (reject_shallow) diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c index 4247fbde95..51c4040ea6 100644 --- a/builtin/commit-graph.c +++ b/builtin/commit-graph.c @@ -192,7 +192,7 @@ static int git_commit_graph_write_config(const char *var, const char *value, static int graph_write(int argc, const char **argv) { - struct string_list pack_indexes = STRING_LIST_INIT_NODUP; + struct string_list pack_indexes = STRING_LIST_INIT_DUP; struct strbuf buf = STRBUF_INIT; struct oidset commits = OIDSET_INIT; struct object_directory *odb = NULL; @@ -273,8 +273,8 @@ static int graph_write(int argc, const char **argv) if (opts.stdin_packs) { while (strbuf_getline(&buf, stdin) != EOF) - string_list_append(&pack_indexes, - strbuf_detach(&buf, NULL)); + string_list_append_nodup(&pack_indexes, + strbuf_detach(&buf, NULL)); } else if (opts.stdin_commits) { oidset_init(&commits, 0); if (opts.progress) diff --git a/builtin/config.c b/builtin/config.c index 542d8d02b2..e7b88a9c08 100644 --- a/builtin/config.c +++ b/builtin/config.c @@ -151,7 +151,7 @@ static struct option builtin_config_options[] = { OPT_BIT(0, "get-color", &actions, N_("find the color configured: slot [default]"), ACTION_GET_COLOR), OPT_BIT(0, "get-colorbool", &actions, N_("find the color setting: slot [stdout-is-tty]"), ACTION_GET_COLORBOOL), OPT_GROUP(N_("Type")), - OPT_CALLBACK('t', "type", &type, "", N_("value is given this type"), option_parse_type), + OPT_CALLBACK('t', "type", &type, N_("type"), N_("value is given this type"), option_parse_type), OPT_CALLBACK_VALUE(0, "bool", &type, N_("value is \"true\" or \"false\""), TYPE_BOOL), OPT_CALLBACK_VALUE(0, "int", &type, N_("value is decimal number"), TYPE_INT), OPT_CALLBACK_VALUE(0, "bool-or-int", &type, N_("value is --bool or --int"), TYPE_BOOL_OR_INT), @@ -612,7 +612,7 @@ static int get_urlmatch(const char *var, const char *url) strbuf_release(&matched->value); } - string_list_clear(&config.vars, 1); + urlmatch_config_release(&config); string_list_clear(&values, 1); free(config.url.url); diff --git a/builtin/fast-export.c b/builtin/fast-export.c index 510139e9b5..a7d72697fb 100644 --- a/builtin/fast-export.c +++ b/builtin/fast-export.c @@ -300,7 +300,7 @@ static void export_blob(const struct object_id *oid) if (!buf) die("could not read blob %s", oid_to_hex(oid)); if (check_object_signature(the_repository, oid, buf, size, - type_name(type), NULL) < 0) + type) < 0) die("oid mismatch in blob %s", oid_to_hex(oid)); object = parse_object_buffer(the_repository, oid, type, size, buf, &eaten); diff --git a/builtin/fast-import.c b/builtin/fast-import.c index b7105fcad9..28d3193c38 100644 --- a/builtin/fast-import.c +++ b/builtin/fast-import.c @@ -865,7 +865,7 @@ static void end_packfile(void) struct tag *t; close_pack_windows(pack_data); - finalize_hashfile(pack_file, cur_pack_oid.hash, 0); + finalize_hashfile(pack_file, cur_pack_oid.hash, FSYNC_COMPONENT_PACK, 0); fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash, pack_data->pack_name, object_count, cur_pack_oid.hash, pack_size); @@ -944,8 +944,8 @@ static int store_object( git_hash_ctx c; git_zstream s; - hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu", - type_name(type), (unsigned long)dat->len) + 1; + hdrlen = format_object_header((char *)hdr, sizeof(hdr), type, + dat->len); the_hash_algo->init_fn(&c); the_hash_algo->update_fn(&c, hdr, hdrlen); the_hash_algo->update_fn(&c, dat->buf, dat->len); @@ -1098,7 +1098,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark) hashfile_checkpoint(pack_file, &checkpoint); offset = checkpoint.offset; - hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1; + hdrlen = format_object_header((char *)out_buf, out_sz, OBJ_BLOB, len); the_hash_algo->init_fn(&c); the_hash_algo->update_fn(&c, out_buf, hdrlen); @@ -2490,7 +2490,7 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa unsigned long size; char *buf = read_object_with_reference(the_repository, &commit_oid, - commit_type, &size, + OBJ_COMMIT, &size, &commit_oid); if (!buf || size < the_hash_algo->hexsz + 6) die("Not a valid commit: %s", p); @@ -2562,7 +2562,7 @@ static void parse_from_existing(struct branch *b) char *buf; buf = read_object_with_reference(the_repository, - &b->oid, commit_type, &size, + &b->oid, OBJ_COMMIT, &size, &b->oid); parse_from_commit(b, buf, size); free(buf); @@ -2658,7 +2658,7 @@ static struct hash_list *parse_merge(unsigned int *count) unsigned long size; char *buf = read_object_with_reference(the_repository, &n->oid, - commit_type, + OBJ_COMMIT, &size, &n->oid); if (!buf || size < the_hash_algo->hexsz + 6) die("Not a valid commit: %s", from); diff --git a/builtin/fetch.c b/builtin/fetch.c index 95832ba1df..9b4018f62c 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -349,7 +349,19 @@ static void clear_item(struct refname_hash_entry *item) item->ignore = 1; } + +static void add_already_queued_tags(const char *refname, + const struct object_id *old_oid, + const struct object_id *new_oid, + void *cb_data) +{ + struct hashmap *queued_tags = cb_data; + if (starts_with(refname, "refs/tags/") && new_oid) + (void) refname_hash_add(queued_tags, refname, new_oid); +} + static void find_non_local_tags(const struct ref *refs, + struct ref_transaction *transaction, struct ref **head, struct ref ***tail) { @@ -367,6 +379,16 @@ static void find_non_local_tags(const struct ref *refs, create_fetch_oidset(head, &fetch_oids); for_each_ref(add_one_refname, &existing_refs); + + /* + * If we already have a transaction, then we need to filter out all + * tags which have already been queued up. + */ + if (transaction) + ref_transaction_for_each_queued_update(transaction, + add_already_queued_tags, + &existing_refs); + for (ref = refs; ref; ref = ref->next) { if (!starts_with(ref->name, "refs/tags/")) continue; @@ -600,7 +622,7 @@ static struct ref *get_ref_map(struct remote *remote, /* also fetch all tags */ get_fetch_map(remote_refs, tag_refspec, &tail, 0); else if (tags == TAGS_DEFAULT && *autotags) - find_non_local_tags(remote_refs, &ref_map, &tail); + find_non_local_tags(remote_refs, NULL, &ref_map, &tail); /* Now append any refs to be updated opportunistically: */ *tail = orefs; @@ -1083,23 +1105,18 @@ N_("it took %.2f seconds to check forced updates; you can use\n" "to avoid this check\n"); static int store_updated_refs(const char *raw_url, const char *remote_name, - int connectivity_checked, struct ref *ref_map, - struct worktree **worktrees) + int connectivity_checked, + struct ref_transaction *transaction, struct ref *ref_map, + struct fetch_head *fetch_head, struct worktree **worktrees) { - struct fetch_head fetch_head; int url_len, i, rc = 0; struct strbuf note = STRBUF_INIT, err = STRBUF_INIT; - struct ref_transaction *transaction = NULL; const char *what, *kind; struct ref *rm; char *url; int want_status; int summary_width = 0; - rc = open_fetch_head(&fetch_head); - if (rc) - return -1; - if (verbosity >= 0) summary_width = transport_summary_width(ref_map); @@ -1118,14 +1135,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, } } - if (atomic_fetch) { - transaction = ref_transaction_begin(&err); - if (!transaction) { - error("%s", err.buf); - goto abort; - } - } - prepare_format_display(ref_map); /* @@ -1137,7 +1146,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, want_status <= FETCH_HEAD_IGNORE; want_status++) { for (rm = ref_map; rm; rm = rm->next) { - struct commit *commit = NULL; struct ref *ref = NULL; if (rm->status == REF_STATUS_REJECT_SHALLOW) { @@ -1148,21 +1156,34 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, } /* - * References in "refs/tags/" are often going to point - * to annotated tags, which are not part of the - * commit-graph. We thus only try to look up refs in - * the graph which are not in that namespace to not - * regress performance in repositories with many - * annotated tags. + * When writing FETCH_HEAD we need to determine whether + * we already have the commit or not. If not, then the + * reference is not for merge and needs to be written + * to the reflog after other commits which we already + * have. We're not interested in this property though + * in case FETCH_HEAD is not to be updated, so we can + * skip the classification in that case. */ - if (!starts_with(rm->name, "refs/tags/")) - commit = lookup_commit_in_graph(the_repository, &rm->old_oid); - if (!commit) { - commit = lookup_commit_reference_gently(the_repository, - &rm->old_oid, - 1); - if (!commit) - rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE; + if (fetch_head->fp) { + struct commit *commit = NULL; + + /* + * References in "refs/tags/" are often going to point + * to annotated tags, which are not part of the + * commit-graph. We thus only try to look up refs in + * the graph which are not in that namespace to not + * regress performance in repositories with many + * annotated tags. + */ + if (!starts_with(rm->name, "refs/tags/")) + commit = lookup_commit_in_graph(the_repository, &rm->old_oid); + if (!commit) { + commit = lookup_commit_reference_gently(the_repository, + &rm->old_oid, + 1); + if (!commit) + rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE; + } } if (rm->fetch_head_status != want_status) @@ -1209,7 +1230,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, strbuf_addf(¬e, "'%s' of ", what); } - append_fetch_head(&fetch_head, &rm->old_oid, + append_fetch_head(fetch_head, &rm->old_oid, rm->fetch_head_status, note.buf, url, url_len); @@ -1241,17 +1262,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, } } - if (!rc && transaction) { - rc = ref_transaction_commit(transaction, &err); - if (rc) { - error("%s", err.buf); - goto abort; - } - } - - if (!rc) - commit_fetch_head(&fetch_head); - if (rc & STORE_REF_ERROR_DF_CONFLICT) error(_("some local refs could not be updated; try running\n" " 'git remote prune %s' to remove any old, conflicting " @@ -1269,9 +1279,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, abort: strbuf_release(¬e); strbuf_release(&err); - ref_transaction_free(transaction); free(url); - close_fetch_head(&fetch_head); return rc; } @@ -1311,7 +1319,9 @@ static int check_exist_and_connected(struct ref *ref_map) } static int fetch_and_consume_refs(struct transport *transport, + struct ref_transaction *transaction, struct ref *ref_map, + struct fetch_head *fetch_head, struct worktree **worktrees) { int connectivity_checked = 1; @@ -1334,7 +1344,8 @@ static int fetch_and_consume_refs(struct transport *transport, trace2_region_enter("fetch", "consume_refs", the_repository); ret = store_updated_refs(transport->url, transport->remote->name, - connectivity_checked, ref_map, worktrees); + connectivity_checked, transaction, ref_map, + fetch_head, worktrees); trace2_region_leave("fetch", "consume_refs", the_repository); out: @@ -1342,11 +1353,14 @@ out: return ret; } -static int prune_refs(struct refspec *rs, struct ref *ref_map, +static int prune_refs(struct refspec *rs, + struct ref_transaction *transaction, + struct ref *ref_map, const char *raw_url) { int url_len, i, result = 0; struct ref *ref, *stale_refs = get_stale_heads(rs, ref_map); + struct strbuf err = STRBUF_INIT; char *url; const char *dangling_msg = dry_run ? _(" (%s will become dangling)") @@ -1366,13 +1380,22 @@ static int prune_refs(struct refspec *rs, struct ref *ref_map, url_len = i - 3; if (!dry_run) { - struct string_list refnames = STRING_LIST_INIT_NODUP; + if (transaction) { + for (ref = stale_refs; ref; ref = ref->next) { + result = ref_transaction_delete(transaction, ref->name, NULL, 0, + "fetch: prune", &err); + if (result) + goto cleanup; + } + } else { + struct string_list refnames = STRING_LIST_INIT_NODUP; - for (ref = stale_refs; ref; ref = ref->next) - string_list_append(&refnames, ref->name); + for (ref = stale_refs; ref; ref = ref->next) + string_list_append(&refnames, ref->name); - result = delete_refs("fetch: prune", &refnames, 0); - string_list_clear(&refnames, 0); + result = delete_refs("fetch: prune", &refnames, 0); + string_list_clear(&refnames, 0); + } } if (verbosity >= 0) { @@ -1393,6 +1416,8 @@ static int prune_refs(struct refspec *rs, struct ref *ref_map, } } +cleanup: + strbuf_release(&err); free(url); free_refs(stale_refs); return result; @@ -1507,10 +1532,13 @@ static struct transport *prepare_transport(struct remote *remote, int deepen) return transport; } -static void backfill_tags(struct transport *transport, struct ref *ref_map, - struct worktree **worktrees) +static int backfill_tags(struct transport *transport, + struct ref_transaction *transaction, + struct ref *ref_map, + struct fetch_head *fetch_head, + struct worktree **worktrees) { - int cannot_reuse; + int retcode, cannot_reuse; /* * Once we have set TRANS_OPT_DEEPEN_SINCE, we can't unset it @@ -1529,18 +1557,21 @@ static void backfill_tags(struct transport *transport, struct ref *ref_map, transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL); transport_set_option(transport, TRANS_OPT_DEPTH, "0"); transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL); - fetch_and_consume_refs(transport, ref_map, worktrees); + retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head, worktrees); if (gsecondary) { transport_disconnect(gsecondary); gsecondary = NULL; } + + return retcode; } static int do_fetch(struct transport *transport, struct refspec *rs) { - struct ref *ref_map; + struct ref_transaction *transaction = NULL; + struct ref *ref_map = NULL; int autotags = (transport->remote->fetch_tags == 1); int retcode = 0; const struct ref *remote_refs; @@ -1548,6 +1579,8 @@ static int do_fetch(struct transport *transport, TRANSPORT_LS_REFS_OPTIONS_INIT; int must_list_refs = 1; struct worktree **worktrees = get_worktrees(); + struct fetch_head fetch_head = { 0 }; + struct strbuf err = STRBUF_INIT; if (tags == TAGS_DEFAULT) { if (transport->remote->fetch_tags == 2) @@ -1605,6 +1638,18 @@ static int do_fetch(struct transport *transport, if (!update_head_ok) check_not_current_branch(ref_map, worktrees); + retcode = open_fetch_head(&fetch_head); + if (retcode) + goto cleanup; + + if (atomic_fetch) { + transaction = ref_transaction_begin(&err); + if (!transaction) { + retcode = error("%s", err.buf); + goto cleanup; + } + } + if (tags == TAGS_DEFAULT && autotags) transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1"); if (prune) { @@ -1614,21 +1659,61 @@ static int do_fetch(struct transport *transport, * don't care whether --tags was specified. */ if (rs->nr) { - retcode = prune_refs(rs, ref_map, transport->url); + retcode = prune_refs(rs, transaction, ref_map, transport->url); } else { retcode = prune_refs(&transport->remote->fetch, - ref_map, + transaction, ref_map, transport->url); } if (retcode != 0) retcode = 1; } - if (fetch_and_consume_refs(transport, ref_map, worktrees)) { - free_refs(ref_map); + + if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head, worktrees)) { retcode = 1; goto cleanup; } + /* + * If neither --no-tags nor --tags was specified, do automated tag + * following. + */ + if (tags == TAGS_DEFAULT && autotags) { + struct ref *tags_ref_map = NULL, **tail = &tags_ref_map; + + find_non_local_tags(remote_refs, transaction, &tags_ref_map, &tail); + if (tags_ref_map) { + /* + * If backfilling of tags fails then we want to tell + * the user so, but we have to continue regardless to + * populate upstream information of the references we + * have already fetched above. The exception though is + * when `--atomic` is passed: in that case we'll abort + * the transaction and don't commit anything. + */ + if (backfill_tags(transport, transaction, tags_ref_map, + &fetch_head, worktrees)) + retcode = 1; + } + + free_refs(tags_ref_map); + } + + if (transaction) { + if (retcode) + goto cleanup; + + retcode = ref_transaction_commit(transaction, &err); + if (retcode) { + error("%s", err.buf); + ref_transaction_free(transaction); + transaction = NULL; + goto cleanup; + } + } + + commit_fetch_head(&fetch_head); + if (set_upstream) { struct branch *branch = branch_get("HEAD"); struct ref *rm; @@ -1648,7 +1733,7 @@ static int do_fetch(struct transport *transport, if (!rm->peer_ref) { if (source_ref) { warning(_("multiple branches detected, incompatible with --set-upstream")); - goto skip; + goto cleanup; } else { source_ref = rm; } @@ -1662,7 +1747,7 @@ static int do_fetch(struct transport *transport, warning(_("could not set upstream of HEAD to '%s' from '%s' when " "it does not point to any branch."), shortname, transport->remote->name); - goto skip; + goto cleanup; } if (!strcmp(source_ref->name, "HEAD") || @@ -1682,21 +1767,16 @@ static int do_fetch(struct transport *transport, "you need to specify exactly one branch with the --set-upstream option")); } } -skip: - free_refs(ref_map); - /* if neither --no-tags nor --tags was specified, do automated tag - * following ... */ - if (tags == TAGS_DEFAULT && autotags) { - struct ref **tail = &ref_map; - ref_map = NULL; - find_non_local_tags(remote_refs, &ref_map, &tail); - if (ref_map) - backfill_tags(transport, ref_map, worktrees); - free_refs(ref_map); +cleanup: + if (retcode && transaction) { + ref_transaction_abort(transaction, &err); + error("%s", err.buf); } -cleanup: + close_fetch_head(&fetch_head); + strbuf_release(&err); + free_refs(ref_map); free_worktrees(worktrees); return retcode; } @@ -2178,13 +2258,13 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) max_children = fetch_parallel_config; add_options_to_argv(&options); - result = fetch_populated_submodules(the_repository, - &options, - submodule_prefix, - recurse_submodules, - recurse_submodules_default, - verbosity < 0, - max_children); + result = fetch_submodules(the_repository, + &options, + submodule_prefix, + recurse_submodules, + recurse_submodules_default, + verbosity < 0, + max_children); strvec_clear(&options); } diff --git a/builtin/gc.c b/builtin/gc.c index ffaf0daf5d..b335cffa33 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -30,7 +30,6 @@ #include "promisor-remote.h" #include "refs.h" #include "remote.h" -#include "object-store.h" #include "exec-cmd.h" #include "hook.h" diff --git a/builtin/grep.c b/builtin/grep.c index f1a924eade..bcb07ea7f7 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -484,7 +484,7 @@ static int grep_submodule(struct grep_opt *opt, object_type = oid_object_info(subrepo, oid, NULL); obj_read_unlock(); data = read_object_with_reference(subrepo, - oid, tree_type, + oid, OBJ_TREE, &size, NULL); if (!data) die(_("unable to read tree (%s)"), oid_to_hex(oid)); @@ -653,7 +653,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec, int hit, len; data = read_object_with_reference(opt->repo, - &obj->oid, tree_type, + &obj->oid, OBJ_TREE, &size, NULL); if (!data) die(_("unable to read tree (%s)"), oid_to_hex(&obj->oid)); diff --git a/builtin/hash-object.c b/builtin/hash-object.c index 0837849288..fbae878c2b 100644 --- a/builtin/hash-object.c +++ b/builtin/hash-object.c @@ -25,7 +25,7 @@ static int hash_literally(struct object_id *oid, int fd, const char *type, unsig if (strbuf_read(&buf, fd, 4096) < 0) ret = -1; else - ret = hash_object_file_literally(buf.buf, buf.len, type, oid, + ret = write_object_file_literally(buf.buf, buf.len, type, oid, flags); strbuf_release(&buf); return ret; diff --git a/builtin/help.c b/builtin/help.c index b4f2ad3f94..222f994f86 100644 --- a/builtin/help.c +++ b/builtin/help.c @@ -51,9 +51,14 @@ static const char *html_path; static int verbose = 1; static enum help_format help_format = HELP_FORMAT_NONE; static int exclude_guides; +static int show_external_commands = -1; +static int show_aliases = -1; static struct option builtin_help_options[] = { OPT_CMDMODE('a', "all", &cmd_mode, N_("print all available commands"), HELP_ACTION_ALL), + OPT_BOOL(0, "external-commands", &show_external_commands, + N_("show external commands in --all")), + OPT_BOOL(0, "aliases", &show_aliases, N_("show aliases in --all")), OPT_HIDDEN_BOOL(0, "exclude-guides", &exclude_guides, N_("exclude guides")), OPT_SET_INT('m', "man", &help_format, N_("show man page"), HELP_FORMAT_MAN), OPT_SET_INT('w', "web", &help_format, N_("show manual in web browser"), @@ -75,8 +80,8 @@ static struct option builtin_help_options[] = { }; static const char * const builtin_help_usage[] = { - N_("git help [-a|--all] [--[no-]verbose]]\n" - " [[-i|--info] [-m|--man] [-w|--web]] [<command>]"), + "git help [-a|--all] [--[no-]verbose]] [--[no-]external-commands] [--[no-]aliases]", + N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>]"), "git help [-g|--guides]", "git help [-c|--config]", NULL @@ -574,11 +579,40 @@ static const char *check_git_cmd(const char* cmd) return cmd; } -static void no_extra_argc(int argc) +static void no_help_format(const char *opt_mode, enum help_format fmt) +{ + const char *opt_fmt; + + switch (fmt) { + case HELP_FORMAT_NONE: + return; + case HELP_FORMAT_MAN: + opt_fmt = "--man"; + break; + case HELP_FORMAT_INFO: + opt_fmt = "--info"; + break; + case HELP_FORMAT_WEB: + opt_fmt = "--web"; + break; + default: + BUG("unreachable"); + } + + usage_msg_optf(_("options '%s' and '%s' cannot be used together"), + builtin_help_usage, builtin_help_options, opt_mode, + opt_fmt); +} + +static void opt_mode_usage(int argc, const char *opt_mode, + enum help_format fmt) { if (argc) - usage_msg_opt(_("this option doesn't take any other arguments"), - builtin_help_usage, builtin_help_options); + usage_msg_optf(_("the '%s' option doesn't take any non-option arguments"), + builtin_help_usage, builtin_help_options, + opt_mode); + + no_help_format(opt_mode, fmt); } int cmd_help(int argc, const char **argv, const char *prefix) @@ -591,11 +625,19 @@ int cmd_help(int argc, const char **argv, const char *prefix) builtin_help_usage, 0); parsed_help_format = help_format; + if (cmd_mode != HELP_ACTION_ALL && + (show_external_commands >= 0 || + show_aliases >= 0)) + usage_msg_opt(_("the '--no-[external-commands|aliases]' options can only be used with '--all'"), + builtin_help_usage, builtin_help_options); + switch (cmd_mode) { case HELP_ACTION_ALL: + opt_mode_usage(argc, "--all", help_format); if (verbose) { setup_pager(); - list_all_cmds_help(); + list_all_cmds_help(show_external_commands, + show_aliases); return 0; } printf(_("usage: %s%s"), _(git_usage_string), "\n\n"); @@ -604,20 +646,21 @@ int cmd_help(int argc, const char **argv, const char *prefix) printf("%s\n", _(git_more_info_string)); break; case HELP_ACTION_GUIDES: - no_extra_argc(argc); + opt_mode_usage(argc, "--guides", help_format); list_guides_help(); printf("%s\n", _(git_more_info_string)); return 0; case HELP_ACTION_CONFIG_FOR_COMPLETION: - no_extra_argc(argc); + opt_mode_usage(argc, "--config-for-completion", help_format); list_config_help(SHOW_CONFIG_VARS); return 0; case HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION: - no_extra_argc(argc); + opt_mode_usage(argc, "--config-sections-for-completion", + help_format); list_config_help(SHOW_CONFIG_SECTIONS); return 0; case HELP_ACTION_CONFIG: - no_extra_argc(argc); + opt_mode_usage(argc, "--config", help_format); setup_pager(); list_config_help(SHOW_CONFIG_HUMAN); printf("\n%s\n", _("'git help config' for more information")); diff --git a/builtin/index-pack.c b/builtin/index-pack.c index c45273de3b..680b66b063 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -453,8 +453,7 @@ static void *unpack_entry_data(off_t offset, unsigned long size, int hdrlen; if (!is_delta_type(type)) { - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX, - type_name(type),(uintmax_t)size) + 1; + hdrlen = format_object_header(hdr, sizeof(hdr), type, size); the_hash_algo->init_fn(&c); the_hash_algo->update_fn(&c, hdr, hdrlen); } else @@ -583,7 +582,7 @@ static void *unpack_data(struct object_entry *obj, if (!n) die(Q_("premature end of pack file, %"PRIuMAX" byte missing", "premature end of pack file, %"PRIuMAX" bytes missing", - (unsigned int)len), + len), (uintmax_t)len); from += n; len -= n; @@ -975,7 +974,7 @@ static struct base_data *resolve_delta(struct object_entry *delta_obj, if (!result_data) bad_object(delta_obj->idx.offset, _("failed to apply delta")); hash_object_file(the_hash_algo, result_data, result_size, - type_name(delta_obj->real_type), &delta_obj->idx.oid); + delta_obj->real_type, &delta_obj->idx.oid); sha1_object(result_data, NULL, result_size, delta_obj->real_type, &delta_obj->idx.oid); @@ -1113,6 +1112,7 @@ static void *threaded_second_pass(void *data) list_add(&child->list, &work_head); base_cache_used += child->size; prune_base_data(NULL); + free_base_data(child); } else { /* * This child does not have its own children. It may be @@ -1135,6 +1135,7 @@ static void *threaded_second_pass(void *data) p = next_p; } + FREE_AND_NULL(child); } work_unlock(); } @@ -1290,7 +1291,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha nr_objects - nr_objects_initial); stop_progress_msg(&progress, msg.buf); strbuf_release(&msg); - finalize_hashfile(f, tail_hash, 0); + finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0); hashcpy(read_hash, pack_hash); fixup_pack_header_footer(output_fd, pack_hash, curr_pack, nr_objects, @@ -1417,9 +1418,8 @@ static void fix_unresolved_deltas(struct hashfile *f) if (!data) continue; - if (check_object_signature(the_repository, &d->oid, - data, size, - type_name(type), NULL)) + if (check_object_signature(the_repository, &d->oid, data, size, + type) < 0) die(_("local object %s is corrupt"), oid_to_hex(&d->oid)); /* @@ -1428,6 +1428,7 @@ static void fix_unresolved_deltas(struct hashfile *f) * object). */ append_obj_to_pack(f, d->oid.hash, data, size, type); + free(data); threaded_second_pass(NULL); display_progress(progress, nr_resolved_deltas); @@ -1512,7 +1513,7 @@ static void final(const char *final_pack_name, const char *curr_pack_name, if (!from_stdin) { close(input_fd); } else { - fsync_or_die(output_fd, curr_pack_name); + fsync_component_or_die(FSYNC_COMPONENT_PACK, output_fd, curr_pack_name); err = close(output_fd); if (err) die_errno(_("error while closing pack file")); @@ -1707,6 +1708,7 @@ static void show_pack_info(int stat_only) i + 1, chain_histogram[i]); } + free(chain_histogram); } int cmd_index_pack(int argc, const char **argv, const char *prefix) @@ -1936,6 +1938,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix) if (do_fsck_object && fsck_finish(&fsck_options)) die(_("fsck error in pack objects")); + free(opts.anomaly); free(objects); strbuf_release(&index_name_buf); strbuf_release(&rev_index_name_buf); diff --git a/builtin/merge-base.c b/builtin/merge-base.c index 26b84980db..a11f8c6e4b 100644 --- a/builtin/merge-base.c +++ b/builtin/merge-base.c @@ -138,6 +138,7 @@ int cmd_merge_base(int argc, const char **argv, const char *prefix) int rev_nr = 0; int show_all = 0; int cmdmode = 0; + int ret; struct option options[] = { OPT_BOOL('a', "all", &show_all, N_("output all common ancestors")), @@ -186,5 +187,7 @@ int cmd_merge_base(int argc, const char **argv, const char *prefix) ALLOC_ARRAY(rev, argc); while (argc-- > 0) rev[rev_nr++] = get_commit_reference(*argv++); - return show_merge_base(rev, rev_nr, show_all); + ret = show_merge_base(rev, rev_nr, show_all); + free(rev); + return ret; } diff --git a/builtin/merge-recursive.c b/builtin/merge-recursive.c index a4bfd8fc51..b9acbf5d34 100644 --- a/builtin/merge-recursive.c +++ b/builtin/merge-recursive.c @@ -58,7 +58,7 @@ int cmd_merge_recursive(int argc, const char **argv, const char *prefix) "Ignoring %s.", "cannot handle more than %d bases. " "Ignoring %s.", - (int)ARRAY_SIZE(bases)-1), + ARRAY_SIZE(bases)-1), (int)ARRAY_SIZE(bases)-1, argv[i]); } if (argc - i != 3) /* "--" "<head>" "<remote>" */ diff --git a/builtin/mktag.c b/builtin/mktag.c index c7b905c614..5d22909122 100644 --- a/builtin/mktag.c +++ b/builtin/mktag.c @@ -61,9 +61,8 @@ static int verify_object_in_tag(struct object_id *tagged_oid, int *tagged_type) type_name(*tagged_type), type_name(type)); repl = lookup_replace_object(the_repository, tagged_oid); - ret = check_object_signature(the_repository, repl, - buffer, size, type_name(*tagged_type), - NULL); + ret = check_object_signature(the_repository, repl, buffer, size, + *tagged_type); free(buffer); return ret; @@ -97,10 +96,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix) &tagged_oid, &tagged_type)) die(_("tag on stdin did not pass our strict fsck check")); - if (verify_object_in_tag(&tagged_oid, &tagged_type)) + if (verify_object_in_tag(&tagged_oid, &tagged_type) < 0) die(_("tag on stdin did not refer to a valid object")); - if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0) + if (write_object_file(buf.buf, buf.len, OBJ_TAG, &result) < 0) die(_("unable to write tag file")); strbuf_release(&buf); diff --git a/builtin/mktree.c b/builtin/mktree.c index 8bdaada922..902edba6d2 100644 --- a/builtin/mktree.c +++ b/builtin/mktree.c @@ -58,7 +58,7 @@ static void write_tree(struct object_id *oid) strbuf_add(&buf, ent->oid.hash, the_hash_algo->rawsz); } - write_object_file(buf.buf, buf.len, tree_type, oid); + write_object_file(buf.buf, buf.len, OBJ_TREE, oid); strbuf_release(&buf); } diff --git a/builtin/name-rev.c b/builtin/name-rev.c index 929591269d..c59b5699fe 100644 --- a/builtin/name-rev.c +++ b/builtin/name-rev.c @@ -9,6 +9,7 @@ #include "prio-queue.h" #include "hash-lookup.h" #include "commit-slab.h" +#include "commit-graph.h" /* * One day. See the 'name a rev shortly after epoch' test in t6120 when @@ -26,9 +27,58 @@ struct rev_name { define_commit_slab(commit_rev_name, struct rev_name); +static timestamp_t generation_cutoff = GENERATION_NUMBER_INFINITY; static timestamp_t cutoff = TIME_MAX; static struct commit_rev_name rev_names; +/* Disable the cutoff checks entirely */ +static void disable_cutoff(void) +{ + generation_cutoff = 0; + cutoff = 0; +} + +/* Cutoff searching any commits older than this one */ +static void set_commit_cutoff(struct commit *commit) +{ + + if (cutoff > commit->date) + cutoff = commit->date; + + if (generation_cutoff) { + timestamp_t generation = commit_graph_generation(commit); + + if (generation_cutoff > generation) + generation_cutoff = generation; + } +} + +/* adjust the commit date cutoff with a slop to allow for slightly incorrect + * commit timestamps in case of clock skew. + */ +static void adjust_cutoff_timestamp_for_slop(void) +{ + if (cutoff) { + /* check for undeflow */ + if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP) + cutoff = cutoff - CUTOFF_DATE_SLOP; + else + cutoff = TIME_MIN; + } +} + +/* Check if a commit is before the cutoff. Prioritize generation numbers + * first, but use the commit timestamp if we lack generation data. + */ +static int commit_is_before_cutoff(struct commit *commit) +{ + if (generation_cutoff < GENERATION_NUMBER_INFINITY) + return generation_cutoff && + commit_graph_generation(commit) < generation_cutoff; + + return commit->date < cutoff; +} + /* How many generations are maximally preferred over _one_ merge traversal? */ #define MERGE_TRAVERSAL_WEIGHT 65535 @@ -151,7 +201,7 @@ static void name_rev(struct commit *start_commit, struct rev_name *start_name; parse_commit(start_commit); - if (start_commit->date < cutoff) + if (commit_is_before_cutoff(start_commit)) return; start_name = create_or_update_name(start_commit, taggerdate, 0, 0, @@ -181,7 +231,7 @@ static void name_rev(struct commit *start_commit, int generation, distance; parse_commit(parent); - if (parent->date < cutoff) + if (commit_is_before_cutoff(parent)) continue; if (parent_number > 1) { @@ -568,7 +618,7 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix) usage_with_options(name_rev_usage, opts); } if (all || annotate_stdin) - cutoff = 0; + disable_cutoff(); for (; argc; argc--, argv++) { struct object_id oid; @@ -596,10 +646,8 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix) continue; } - if (commit) { - if (cutoff > commit->date) - cutoff = commit->date; - } + if (commit) + set_commit_cutoff(commit); if (peel_tag) { if (!commit) { @@ -612,13 +660,8 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix) add_object_array(object, *argv, &revs); } - if (cutoff) { - /* check for undeflow */ - if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP) - cutoff = cutoff - CUTOFF_DATE_SLOP; - else - cutoff = TIME_MIN; - } + adjust_cutoff_timestamp_for_slop(); + for_each_ref(name_ref, &data); name_tips(); diff --git a/builtin/notes.c b/builtin/notes.c index f99593a185..a3d0d15a22 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -199,7 +199,7 @@ static void prepare_note_data(const struct object_id *object, struct note_data * static void write_note_data(struct note_data *d, struct object_id *oid) { - if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) { + if (write_object_file(d->buf.buf, d->buf.len, OBJ_BLOB, oid)) { int status = die_message(_("unable to write note object")); if (d->edit_path) diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 178e611f09..c23b788ac8 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -1199,16 +1199,26 @@ static void write_pack_file(void) display_progress(progress_state, written); } - /* - * Did we write the wrong # entries in the header? - * If so, rewrite it like in fast-import - */ if (pack_to_stdout) { - finalize_hashfile(f, hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); + /* + * We never fsync when writing to stdout since we may + * not be writing to an actual pack file. For instance, + * the upload-pack code passes a pipe here. Calling + * fsync on a pipe results in unnecessary + * synchronization with the reader on some platforms. + */ + finalize_hashfile(f, hash, FSYNC_COMPONENT_NONE, + CSUM_HASH_IN_STREAM | CSUM_CLOSE); } else if (nr_written == nr_remaining) { - finalize_hashfile(f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); + finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK, + CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); } else { - int fd = finalize_hashfile(f, hash, 0); + /* + * If we wrote the wrong number of entries in the + * header, rewrite it like in fast-import. + */ + + int fd = finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK, 0); fixup_pack_header_footer(fd, hash, pack_tmp_name, nr_written, hash, offset); close(fd); @@ -1802,7 +1812,7 @@ static void add_preferred_base(struct object_id *oid) return; data = read_object_with_reference(the_repository, oid, - tree_type, &size, &tree_oid); + OBJ_TREE, &size, &tree_oid); if (!data) return; @@ -3651,7 +3661,7 @@ static int pack_options_allow_reuse(void) static int get_object_list_from_bitmap(struct rev_info *revs) { - if (!(bitmap_git = prepare_bitmap_walk(revs, &filter_options, 0))) + if (!(bitmap_git = prepare_bitmap_walk(revs, 0))) return -1; if (pack_options_allow_reuse() && @@ -3727,6 +3737,7 @@ static void get_object_list(int ac, const char **av) repo_init_revisions(the_repository, &revs, NULL); save_commit_buffer = 0; setup_revisions(ac, av, &revs, &s_r_opt); + list_objects_filter_copy(&revs.filter, &filter_options); /* make sure shallows are read */ is_repository_shallow(the_repository); @@ -3777,9 +3788,9 @@ static void get_object_list(int ac, const char **av) if (!fn_show_object) fn_show_object = show_object; - traverse_commit_list_filtered(&filter_options, &revs, - show_commit, fn_show_object, NULL, - NULL); + traverse_commit_list(&revs, + show_commit, fn_show_object, + NULL); if (unpack_unreachable_expiration) { revs.ignore_missing_links = 1; diff --git a/builtin/read-tree.c b/builtin/read-tree.c index 2109c4c9e5..9f1f33e954 100644 --- a/builtin/read-tree.c +++ b/builtin/read-tree.c @@ -160,15 +160,22 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix) argc = parse_options(argc, argv, cmd_prefix, read_tree_options, read_tree_usage, 0); - hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); - prefix_set = opts.prefix ? 1 : 0; if (1 < opts.merge + opts.reset + prefix_set) die("Which one? -m, --reset, or --prefix?"); + /* Prefix should not start with a directory separator */ + if (opts.prefix && opts.prefix[0] == '/') + die("Invalid prefix, prefix cannot start with '/'"); + if (opts.reset) opts.reset = UNPACK_RESET_OVERWRITE_UNTRACKED; + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; + + hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); + /* * NEEDSWORK * @@ -210,6 +217,9 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix) if (opts.merge && !opts.index_only) setup_work_tree(); + if (opts.skip_sparse_checkout) + ensure_full_index(&the_index); + if (opts.merge) { switch (stage - 1) { case 0: diff --git a/builtin/rebase.c b/builtin/rebase.c index b29ad2b65e..27fde7bf28 100644 --- a/builtin/rebase.c +++ b/builtin/rebase.c @@ -829,6 +829,8 @@ static int checkout_up_to_date(struct rebase_options *options) ropts.oid = &options->orig_head; ropts.branch = options->head_name; ropts.flags = RESET_HEAD_RUN_POST_CHECKOUT_HOOK; + if (!ropts.branch) + ropts.flags |= RESET_HEAD_DETACH; ropts.head_msg = buf.buf; if (reset_head(the_repository, &ropts) < 0) ret = error(_("could not switch to %s"), options->switch_to); diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c index d4db577669..9aabffa1af 100644 --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@ -749,7 +749,7 @@ static void prepare_push_cert_sha1(struct child_process *proc) int bogs /* beginning_of_gpg_sig */; already_done = 1; - if (write_object_file(push_cert.buf, push_cert.len, "blob", + if (write_object_file(push_cert.buf, push_cert.len, OBJ_BLOB, &push_cert_oid)) oidclr(&push_cert_oid); @@ -813,13 +813,14 @@ static int run_and_feed_hook(const char *hook_name, feed_fn feed, proc.trace2_hook_name = hook_name; if (feed_state->push_options) { - int i; + size_t i; for (i = 0; i < feed_state->push_options->nr; i++) strvec_pushf(&proc.env_array, - "GIT_PUSH_OPTION_%d=%s", i, + "GIT_PUSH_OPTION_%"PRIuMAX"=%s", + (uintmax_t)i, feed_state->push_options->items[i].string); - strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT=%d", - feed_state->push_options->nr); + strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT=%"PRIuMAX"", + (uintmax_t)feed_state->push_options->nr); } else strvec_pushf(&proc.env_array, "GIT_PUSH_OPTION_COUNT"); diff --git a/builtin/reflog.c b/builtin/reflog.c index 016466852f..9407f835cb 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -1,16 +1,9 @@ #include "builtin.h" #include "config.h" -#include "lockfile.h" -#include "object-store.h" -#include "repository.h" -#include "commit.h" -#include "refs.h" -#include "dir.h" -#include "tree-walk.h" -#include "diff.h" #include "revision.h" #include "reachable.h" #include "worktree.h" +#include "reflog.h" static const char reflog_exists_usage[] = N_("git reflog exists <ref>"); @@ -18,404 +11,11 @@ N_("git reflog exists <ref>"); static timestamp_t default_reflog_expire; static timestamp_t default_reflog_expire_unreachable; -struct cmd_reflog_expire_cb { - int stalefix; - int explicit_expiry; - timestamp_t expire_total; - timestamp_t expire_unreachable; - int recno; -}; - -struct expire_reflog_policy_cb { - enum { - UE_NORMAL, - UE_ALWAYS, - UE_HEAD - } unreachable_expire_kind; - struct commit_list *mark_list; - unsigned long mark_limit; - struct cmd_reflog_expire_cb cmd; - struct commit *tip_commit; - struct commit_list *tips; - unsigned int dry_run:1; -}; - struct worktree_reflogs { struct worktree *worktree; struct string_list reflogs; }; -/* Remember to update object flag allocation in object.h */ -#define INCOMPLETE (1u<<10) -#define STUDYING (1u<<11) -#define REACHABLE (1u<<12) - -static int tree_is_complete(const struct object_id *oid) -{ - struct tree_desc desc; - struct name_entry entry; - int complete; - struct tree *tree; - - tree = lookup_tree(the_repository, oid); - if (!tree) - return 0; - if (tree->object.flags & SEEN) - return 1; - if (tree->object.flags & INCOMPLETE) - return 0; - - if (!tree->buffer) { - enum object_type type; - unsigned long size; - void *data = read_object_file(oid, &type, &size); - if (!data) { - tree->object.flags |= INCOMPLETE; - return 0; - } - tree->buffer = data; - tree->size = size; - } - init_tree_desc(&desc, tree->buffer, tree->size); - complete = 1; - while (tree_entry(&desc, &entry)) { - if (!has_object_file(&entry.oid) || - (S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) { - tree->object.flags |= INCOMPLETE; - complete = 0; - } - } - free_tree_buffer(tree); - - if (complete) - tree->object.flags |= SEEN; - return complete; -} - -static int commit_is_complete(struct commit *commit) -{ - struct object_array study; - struct object_array found; - int is_incomplete = 0; - int i; - - /* early return */ - if (commit->object.flags & SEEN) - return 1; - if (commit->object.flags & INCOMPLETE) - return 0; - /* - * Find all commits that are reachable and are not marked as - * SEEN. Then make sure the trees and blobs contained are - * complete. After that, mark these commits also as SEEN. - * If some of the objects that are needed to complete this - * commit are missing, mark this commit as INCOMPLETE. - */ - memset(&study, 0, sizeof(study)); - memset(&found, 0, sizeof(found)); - add_object_array(&commit->object, NULL, &study); - add_object_array(&commit->object, NULL, &found); - commit->object.flags |= STUDYING; - while (study.nr) { - struct commit *c; - struct commit_list *parent; - - c = (struct commit *)object_array_pop(&study); - if (!c->object.parsed && !parse_object(the_repository, &c->object.oid)) - c->object.flags |= INCOMPLETE; - - if (c->object.flags & INCOMPLETE) { - is_incomplete = 1; - break; - } - else if (c->object.flags & SEEN) - continue; - for (parent = c->parents; parent; parent = parent->next) { - struct commit *p = parent->item; - if (p->object.flags & STUDYING) - continue; - p->object.flags |= STUDYING; - add_object_array(&p->object, NULL, &study); - add_object_array(&p->object, NULL, &found); - } - } - if (!is_incomplete) { - /* - * make sure all commits in "found" array have all the - * necessary objects. - */ - for (i = 0; i < found.nr; i++) { - struct commit *c = - (struct commit *)found.objects[i].item; - if (!tree_is_complete(get_commit_tree_oid(c))) { - is_incomplete = 1; - c->object.flags |= INCOMPLETE; - } - } - if (!is_incomplete) { - /* mark all found commits as complete, iow SEEN */ - for (i = 0; i < found.nr; i++) - found.objects[i].item->flags |= SEEN; - } - } - /* clear flags from the objects we traversed */ - for (i = 0; i < found.nr; i++) - found.objects[i].item->flags &= ~STUDYING; - if (is_incomplete) - commit->object.flags |= INCOMPLETE; - else { - /* - * If we come here, we have (1) traversed the ancestry chain - * from the "commit" until we reach SEEN commits (which are - * known to be complete), and (2) made sure that the commits - * encountered during the above traversal refer to trees that - * are complete. Which means that we know *all* the commits - * we have seen during this process are complete. - */ - for (i = 0; i < found.nr; i++) - found.objects[i].item->flags |= SEEN; - } - /* free object arrays */ - object_array_clear(&study); - object_array_clear(&found); - return !is_incomplete; -} - -static int keep_entry(struct commit **it, struct object_id *oid) -{ - struct commit *commit; - - if (is_null_oid(oid)) - return 1; - commit = lookup_commit_reference_gently(the_repository, oid, 1); - if (!commit) - return 0; - - /* - * Make sure everything in this commit exists. - * - * We have walked all the objects reachable from the refs - * and cache earlier. The commits reachable by this commit - * must meet SEEN commits -- and then we should mark them as - * SEEN as well. - */ - if (!commit_is_complete(commit)) - return 0; - *it = commit; - return 1; -} - -/* - * Starting from commits in the cb->mark_list, mark commits that are - * reachable from them. Stop the traversal at commits older than - * the expire_limit and queue them back, so that the caller can call - * us again to restart the traversal with longer expire_limit. - */ -static void mark_reachable(struct expire_reflog_policy_cb *cb) -{ - struct commit_list *pending; - timestamp_t expire_limit = cb->mark_limit; - struct commit_list *leftover = NULL; - - for (pending = cb->mark_list; pending; pending = pending->next) - pending->item->object.flags &= ~REACHABLE; - - pending = cb->mark_list; - while (pending) { - struct commit_list *parent; - struct commit *commit = pop_commit(&pending); - if (commit->object.flags & REACHABLE) - continue; - if (parse_commit(commit)) - continue; - commit->object.flags |= REACHABLE; - if (commit->date < expire_limit) { - commit_list_insert(commit, &leftover); - continue; - } - commit->object.flags |= REACHABLE; - parent = commit->parents; - while (parent) { - commit = parent->item; - parent = parent->next; - if (commit->object.flags & REACHABLE) - continue; - commit_list_insert(commit, &pending); - } - } - cb->mark_list = leftover; -} - -static int unreachable(struct expire_reflog_policy_cb *cb, struct commit *commit, struct object_id *oid) -{ - /* - * We may or may not have the commit yet - if not, look it - * up using the supplied sha1. - */ - if (!commit) { - if (is_null_oid(oid)) - return 0; - - commit = lookup_commit_reference_gently(the_repository, oid, - 1); - - /* Not a commit -- keep it */ - if (!commit) - return 0; - } - - /* Reachable from the current ref? Don't prune. */ - if (commit->object.flags & REACHABLE) - return 0; - - if (cb->mark_list && cb->mark_limit) { - cb->mark_limit = 0; /* dig down to the root */ - mark_reachable(cb); - } - - return !(commit->object.flags & REACHABLE); -} - -/* - * Return true iff the specified reflog entry should be expired. - */ -static int should_expire_reflog_ent(struct object_id *ooid, struct object_id *noid, - const char *email, timestamp_t timestamp, int tz, - const char *message, void *cb_data) -{ - struct expire_reflog_policy_cb *cb = cb_data; - struct commit *old_commit, *new_commit; - - if (timestamp < cb->cmd.expire_total) - return 1; - - old_commit = new_commit = NULL; - if (cb->cmd.stalefix && - (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid))) - return 1; - - if (timestamp < cb->cmd.expire_unreachable) { - switch (cb->unreachable_expire_kind) { - case UE_ALWAYS: - return 1; - case UE_NORMAL: - case UE_HEAD: - if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid)) - return 1; - break; - } - } - - if (cb->cmd.recno && --(cb->cmd.recno) == 0) - return 1; - - return 0; -} - -static int should_expire_reflog_ent_verbose(struct object_id *ooid, - struct object_id *noid, - const char *email, - timestamp_t timestamp, int tz, - const char *message, void *cb_data) -{ - struct expire_reflog_policy_cb *cb = cb_data; - int expire; - - expire = should_expire_reflog_ent(ooid, noid, email, timestamp, tz, - message, cb); - - if (!expire) - printf("keep %s", message); - else if (cb->dry_run) - printf("would prune %s", message); - else - printf("prune %s", message); - - return expire; -} - -static int push_tip_to_list(const char *refname, const struct object_id *oid, - int flags, void *cb_data) -{ - struct commit_list **list = cb_data; - struct commit *tip_commit; - if (flags & REF_ISSYMREF) - return 0; - tip_commit = lookup_commit_reference_gently(the_repository, oid, 1); - if (!tip_commit) - return 0; - commit_list_insert(tip_commit, list); - return 0; -} - -static int is_head(const char *refname) -{ - switch (ref_type(refname)) { - case REF_TYPE_OTHER_PSEUDOREF: - case REF_TYPE_MAIN_PSEUDOREF: - if (parse_worktree_ref(refname, NULL, NULL, &refname)) - BUG("not a worktree ref: %s", refname); - break; - default: - break; - } - return !strcmp(refname, "HEAD"); -} - -static void reflog_expiry_prepare(const char *refname, - const struct object_id *oid, - void *cb_data) -{ - struct expire_reflog_policy_cb *cb = cb_data; - struct commit_list *elem; - struct commit *commit = NULL; - - if (!cb->cmd.expire_unreachable || is_head(refname)) { - cb->unreachable_expire_kind = UE_HEAD; - } else { - commit = lookup_commit(the_repository, oid); - cb->unreachable_expire_kind = commit ? UE_NORMAL : UE_ALWAYS; - } - - if (cb->cmd.expire_unreachable <= cb->cmd.expire_total) - cb->unreachable_expire_kind = UE_ALWAYS; - - switch (cb->unreachable_expire_kind) { - case UE_ALWAYS: - return; - case UE_HEAD: - for_each_ref(push_tip_to_list, &cb->tips); - for (elem = cb->tips; elem; elem = elem->next) - commit_list_insert(elem->item, &cb->mark_list); - break; - case UE_NORMAL: - commit_list_insert(commit, &cb->mark_list); - /* For reflog_expiry_cleanup() below */ - cb->tip_commit = commit; - } - cb->mark_limit = cb->cmd.expire_total; - mark_reachable(cb); -} - -static void reflog_expiry_cleanup(void *cb_data) -{ - struct expire_reflog_policy_cb *cb = cb_data; - struct commit_list *elem; - - switch (cb->unreachable_expire_kind) { - case UE_ALWAYS: - return; - case UE_HEAD: - for (elem = cb->tips; elem; elem = elem->next) - clear_commit_marks(elem->item, REACHABLE); - free_commit_list(cb->tips); - break; - case UE_NORMAL: - clear_commit_marks(cb->tip_commit, REACHABLE); - break; - } -} - static int collect_reflog(const char *ref, const struct object_id *oid, int unused, void *cb_data) { struct worktree_reflogs *cb = cb_data; @@ -704,16 +304,6 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix) return status; } -static int count_reflog_ent(struct object_id *ooid, struct object_id *noid, - const char *email, timestamp_t timestamp, int tz, - const char *message, void *cb_data) -{ - struct cmd_reflog_expire_cb *cb = cb_data; - if (!cb->expire_total || timestamp < cb->expire_total) - cb->recno++; - return 0; -} - static const char * reflog_delete_usage[] = { N_("git reflog delete [--rewrite] [--updateref] " "[--dry-run | -n] [--verbose] <refs>..."), @@ -722,11 +312,10 @@ static const char * reflog_delete_usage[] = { static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) { - struct cmd_reflog_expire_cb cmd = { 0 }; int i, status = 0; unsigned int flags = 0; int verbose = 0; - reflog_expiry_should_prune_fn *should_prune_fn = should_expire_reflog_ent; + const struct option options[] = { OPT_BIT(0, "dry-run", &flags, N_("do not actually prune any entries"), EXPIRE_REFLOGS_DRY_RUN), @@ -742,48 +331,12 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, options, reflog_delete_usage, 0); - if (verbose) - should_prune_fn = should_expire_reflog_ent_verbose; - if (argc < 1) return error(_("no reflog specified to delete")); - for (i = 0; i < argc; i++) { - const char *spec = strstr(argv[i], "@{"); - char *ep, *ref; - int recno; - struct expire_reflog_policy_cb cb = { - .dry_run = !!(flags & EXPIRE_REFLOGS_DRY_RUN), - }; - - if (!spec) { - status |= error(_("not a reflog: %s"), argv[i]); - continue; - } + for (i = 0; i < argc; i++) + status |= reflog_delete(argv[i], flags, verbose); - if (!dwim_log(argv[i], spec - argv[i], NULL, &ref)) { - status |= error(_("no reflog for '%s'"), argv[i]); - continue; - } - - recno = strtoul(spec + 2, &ep, 10); - if (*ep == '}') { - cmd.recno = -recno; - for_each_reflog_ent(ref, count_reflog_ent, &cmd); - } else { - cmd.expire_total = approxidate(spec + 2); - for_each_reflog_ent(ref, count_reflog_ent, &cmd); - cmd.expire_total = 0; - } - - cb.cmd = cmd; - status |= reflog_expire(ref, flags, - reflog_expiry_prepare, - should_prune_fn, - reflog_expiry_cleanup, - &cb); - free(ref); - } return status; } diff --git a/builtin/remote.c b/builtin/remote.c index 6f27ddc47b..5f4cde9d78 100644 --- a/builtin/remote.c +++ b/builtin/remote.c @@ -12,11 +12,12 @@ #include "object-store.h" #include "strvec.h" #include "commit-reach.h" +#include "progress.h" static const char * const builtin_remote_usage[] = { "git remote [-v | --verbose]", N_("git remote add [-t <branch>] [-m <master>] [-f] [--tags | --no-tags] [--mirror=<fetch|push>] <name> <url>"), - N_("git remote rename <old> <new>"), + N_("git remote rename [--[no-]progress] <old> <new>"), N_("git remote remove <name>"), N_("git remote set-head <name> (-a | --auto | -d | --delete | <branch>)"), N_("git remote [-v | --verbose] show [-n] <name>"), @@ -36,7 +37,7 @@ static const char * const builtin_remote_add_usage[] = { }; static const char * const builtin_remote_rename_usage[] = { - N_("git remote rename <old> <new>"), + N_("git remote rename [--[no-]progress] <old> <new>"), NULL }; @@ -571,6 +572,7 @@ struct rename_info { const char *old_name; const char *new_name; struct string_list *remote_branches; + uint32_t symrefs_nr; }; static int read_remote_branches(const char *refname, @@ -587,10 +589,12 @@ static int read_remote_branches(const char *refname, item = string_list_append(rename->remote_branches, refname); symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING, NULL, &flag); - if (symref && (flag & REF_ISSYMREF)) + if (symref && (flag & REF_ISSYMREF)) { item->util = xstrdup(symref); - else + rename->symrefs_nr++; + } else { item->util = NULL; + } } strbuf_release(&buf); @@ -674,7 +678,9 @@ static void handle_push_default(const char* old_name, const char* new_name) static int mv(int argc, const char **argv) { + int show_progress = isatty(2); struct option options[] = { + OPT_BOOL(0, "progress", &show_progress, N_("force progress reporting")), OPT_END() }; struct remote *oldremote, *newremote; @@ -682,14 +688,19 @@ static int mv(int argc, const char **argv) old_remote_context = STRBUF_INIT; struct string_list remote_branches = STRING_LIST_INIT_DUP; struct rename_info rename; - int i, refspec_updated = 0; + int i, refs_renamed_nr = 0, refspec_updated = 0; + struct progress *progress = NULL; + + argc = parse_options(argc, argv, NULL, options, + builtin_remote_rename_usage, 0); - if (argc != 3) + if (argc != 2) usage_with_options(builtin_remote_rename_usage, options); - rename.old_name = argv[1]; - rename.new_name = argv[2]; + rename.old_name = argv[0]; + rename.new_name = argv[1]; rename.remote_branches = &remote_branches; + rename.symrefs_nr = 0; oldremote = remote_get(rename.old_name); if (!remote_is_configured(oldremote, 1)) { @@ -764,15 +775,26 @@ static int mv(int argc, const char **argv) * the new symrefs. */ for_each_ref(read_remote_branches, &rename); + if (show_progress) { + /* + * Count symrefs twice, since "renaming" them is done by + * deleting and recreating them in two separate passes. + */ + progress = start_progress(_("Renaming remote references"), + rename.remote_branches->nr + rename.symrefs_nr); + } for (i = 0; i < remote_branches.nr; i++) { struct string_list_item *item = remote_branches.items + i; - int flag = 0; + struct strbuf referent = STRBUF_INIT; - read_ref_full(item->string, RESOLVE_REF_READING, NULL, &flag); - if (!(flag & REF_ISSYMREF)) + if (refs_read_symbolic_ref(get_main_ref_store(the_repository), item->string, + &referent)) continue; if (delete_ref(NULL, item->string, NULL, REF_NO_DEREF)) die(_("deleting '%s' failed"), item->string); + + strbuf_release(&referent); + display_progress(progress, ++refs_renamed_nr); } for (i = 0; i < remote_branches.nr; i++) { struct string_list_item *item = remote_branches.items + i; @@ -788,6 +810,7 @@ static int mv(int argc, const char **argv) item->string, buf.buf); if (rename_ref(item->string, buf.buf, buf2.buf)) die(_("renaming '%s' failed"), item->string); + display_progress(progress, ++refs_renamed_nr); } for (i = 0; i < remote_branches.nr; i++) { struct string_list_item *item = remote_branches.items + i; @@ -807,7 +830,9 @@ static int mv(int argc, const char **argv) item->string, buf.buf); if (create_symref(buf.buf, buf2.buf, buf3.buf)) die(_("creating '%s' failed"), buf.buf); + display_progress(progress, ++refs_renamed_nr); } + stop_progress(&progress); string_list_clear(&remote_branches, 1); handle_push_default(rename.old_name, rename.new_name); diff --git a/builtin/repack.c b/builtin/repack.c index da1e364a75..d1a563d5b6 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -22,6 +22,7 @@ static int delta_base_offset = 1; static int pack_kept_objects = -1; static int write_bitmaps = -1; static int use_delta_islands; +static int run_update_server_info = 1; static char *packdir, *packtmp_name, *packtmp; static const char *const git_repack_usage[] = { @@ -54,6 +55,10 @@ static int repack_config(const char *var, const char *value, void *cb) use_delta_islands = git_config_bool(var, value); return 0; } + if (strcmp(var, "repack.updateserverinfo") == 0) { + run_update_server_info = git_config_bool(var, value); + return 0; + } return git_default_config(var, value, cb); } @@ -620,7 +625,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix) const char *unpack_unreachable = NULL; int keep_unreachable = 0; struct string_list keep_pack_list = STRING_LIST_INIT_NODUP; - int no_update_server_info = 0; struct pack_objects_args po_args = {NULL}; int geometric_factor = 0; int write_midx = 0; @@ -637,8 +641,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix) N_("pass --no-reuse-delta to git-pack-objects")), OPT_BOOL('F', NULL, &po_args.no_reuse_object, N_("pass --no-reuse-object to git-pack-objects")), - OPT_BOOL('n', NULL, &no_update_server_info, - N_("do not run git-update-server-info")), + OPT_NEGBIT('n', NULL, &run_update_server_info, + N_("do not run git-update-server-info"), 1), OPT__QUIET(&po_args.quiet, N_("be quiet")), OPT_BOOL('l', "local", &po_args.local, N_("pass --local to git-pack-objects")), @@ -939,7 +943,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix) prune_shallow(PRUNE_QUICK); } - if (!no_update_server_info) + if (run_update_server_info) update_server_info(0); remove_temporary_files(); diff --git a/builtin/replace.c b/builtin/replace.c index ac92337c0e..5068f4f0b2 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -409,7 +409,7 @@ static int check_one_mergetag(struct commit *commit, int i; hash_object_file(the_hash_algo, extra->value, extra->len, - type_name(OBJ_TAG), &tag_oid); + OBJ_TAG, &tag_oid); tag = lookup_tag(the_repository, &tag_oid); if (!tag) return error(_("bad mergetag in commit '%s'"), ref); @@ -474,7 +474,7 @@ static int create_graft(int argc, const char **argv, int force, int gentle) return -1; } - if (write_object_file(buf.buf, buf.len, commit_type, &new_oid)) { + if (write_object_file(buf.buf, buf.len, OBJ_COMMIT, &new_oid)) { strbuf_release(&buf); return error(_("could not write replacement commit for: '%s'"), old_ref); diff --git a/builtin/rev-list.c b/builtin/rev-list.c index 38528c7f15..572da1472e 100644 --- a/builtin/rev-list.c +++ b/builtin/rev-list.c @@ -62,7 +62,6 @@ static const char rev_list_usage[] = static struct progress *progress; static unsigned progress_counter; -static struct list_objects_filter_options filter_options; static struct oidset omitted_objects; static int arg_print_omitted; /* print objects omitted by filter */ @@ -400,7 +399,6 @@ static inline int parse_missing_action_value(const char *value) } static int try_bitmap_count(struct rev_info *revs, - struct list_objects_filter_options *filter, int filter_provided_objects) { uint32_t commit_count = 0, @@ -436,7 +434,7 @@ static int try_bitmap_count(struct rev_info *revs, */ max_count = revs->max_count; - bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects); + bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects); if (!bitmap_git) return -1; @@ -453,7 +451,6 @@ static int try_bitmap_count(struct rev_info *revs, } static int try_bitmap_traversal(struct rev_info *revs, - struct list_objects_filter_options *filter, int filter_provided_objects) { struct bitmap_index *bitmap_git; @@ -465,7 +462,7 @@ static int try_bitmap_traversal(struct rev_info *revs, if (revs->max_count >= 0) return -1; - bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects); + bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects); if (!bitmap_git) return -1; @@ -475,7 +472,6 @@ static int try_bitmap_traversal(struct rev_info *revs, } static int try_bitmap_disk_usage(struct rev_info *revs, - struct list_objects_filter_options *filter, int filter_provided_objects) { struct bitmap_index *bitmap_git; @@ -483,7 +479,7 @@ static int try_bitmap_disk_usage(struct rev_info *revs, if (!show_disk_usage) return -1; - bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects); + bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects); if (!bitmap_git) return -1; @@ -595,17 +591,6 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) show_progress = arg; continue; } - - if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) { - parse_list_objects_filter(&filter_options, arg); - if (filter_options.choice && !revs.blob_objects) - die(_("object filtering requires --objects")); - continue; - } - if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) { - list_objects_filter_set_no_filter(&filter_options); - continue; - } if (!strcmp(arg, "--filter-provided-objects")) { filter_provided_objects = 1; continue; @@ -688,11 +673,11 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) progress = start_delayed_progress(show_progress, 0); if (use_bitmap_index) { - if (!try_bitmap_count(&revs, &filter_options, filter_provided_objects)) + if (!try_bitmap_count(&revs, filter_provided_objects)) return 0; - if (!try_bitmap_disk_usage(&revs, &filter_options, filter_provided_objects)) + if (!try_bitmap_disk_usage(&revs, filter_provided_objects)) return 0; - if (!try_bitmap_traversal(&revs, &filter_options, filter_provided_objects)) + if (!try_bitmap_traversal(&revs, filter_provided_objects)) return 0; } @@ -733,7 +718,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) oidset_init(&missing_objects, DEFAULT_OIDSET_SIZE); traverse_commit_list_filtered( - &filter_options, &revs, show_commit, show_object, &info, + &revs, show_commit, show_object, &info, (arg_print_omitted ? &omitted_objects : NULL)); if (arg_print_omitted) { diff --git a/builtin/shortlog.c b/builtin/shortlog.c index 228d782754..26c5c0cf93 100644 --- a/builtin/shortlog.c +++ b/builtin/shortlog.c @@ -435,7 +435,7 @@ static void add_wrapped_shortlog_msg(struct strbuf *sb, const char *s, void shortlog_output(struct shortlog *log) { - int i, j; + size_t i, j; struct strbuf sb = STRBUF_INIT; if (log->sort_by_number) @@ -448,10 +448,10 @@ void shortlog_output(struct shortlog *log) (int)UTIL_TO_INT(item), item->string); } else { struct string_list *onelines = item->util; - fprintf(log->file, "%s (%d):\n", - item->string, onelines->nr); - for (j = onelines->nr - 1; j >= 0; j--) { - const char *msg = onelines->items[j].string; + fprintf(log->file, "%s (%"PRIuMAX"):\n", + item->string, (uintmax_t)onelines->nr); + for (j = onelines->nr; j >= 1; j--) { + const char *msg = onelines->items[j - 1].string; if (log->wrap_lines) { strbuf_reset(&sb); diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c index 5518ed47f6..0217d44c5b 100644 --- a/builtin/sparse-checkout.c +++ b/builtin/sparse-checkout.c @@ -8,7 +8,6 @@ #include "run-command.h" #include "strbuf.h" #include "string-list.h" -#include "cache.h" #include "cache-tree.h" #include "lockfile.h" #include "resolve-undo.h" @@ -329,11 +328,11 @@ static int write_patterns_and_update(struct pattern_list *pl) fd = hold_lock_file_for_update(&lk, sparse_filename, LOCK_DIE_ON_ERROR); + free(sparse_filename); result = update_working_directory(pl); if (result) { rollback_lock_file(&lk); - free(sparse_filename); clear_pattern_list(pl); update_working_directory(NULL); return result; @@ -349,7 +348,6 @@ static int write_patterns_and_update(struct pattern_list *pl) fflush(fp); commit_lock_file(&lk); - free(sparse_filename); clear_pattern_list(pl); return 0; diff --git a/builtin/stash.c b/builtin/stash.c index 3e8af210fd..ccdfdab44b 100644 --- a/builtin/stash.c +++ b/builtin/stash.c @@ -16,7 +16,7 @@ #include "log-tree.h" #include "diffcore.h" #include "exec-cmd.h" -#include "entry.h" +#include "reflog.h" #define INCLUDE_ALL_FILES 2 @@ -634,20 +634,9 @@ static int reflog_is_empty(const char *refname) static int do_drop_stash(struct stash_info *info, int quiet) { - int ret; - struct child_process cp_reflog = CHILD_PROCESS_INIT; - - /* - * reflog does not provide a simple function for deleting refs. One will - * need to be added to avoid implementing too much reflog code here - */ - - cp_reflog.git_cmd = 1; - strvec_pushl(&cp_reflog.args, "reflog", "delete", "--updateref", - "--rewrite", NULL); - strvec_push(&cp_reflog.args, info->revision.buf); - ret = run_command(&cp_reflog); - if (!ret) { + if (!reflog_delete(info->revision.buf, + EXPIRE_REFLOGS_REWRITE | EXPIRE_REFLOGS_UPDATE_REF, + 0)) { if (!quiet) printf_ln(_("Dropped %s (%s)"), info->revision.buf, oid_to_hex(&info->w_commit)); diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index d8638434dc..5301612d24 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -31,11 +31,13 @@ typedef void (*each_submodule_fn)(const struct cache_entry *list_item, void *cb_data); -static char *get_default_remote(void) +static char *repo_get_default_remote(struct repository *repo) { char *dest = NULL, *ret; struct strbuf sb = STRBUF_INIT; - const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL); + struct ref_store *store = get_main_ref_store(repo); + const char *refname = refs_resolve_ref_unsafe(store, "HEAD", 0, NULL, + NULL); if (!refname) die(_("No such ref: %s"), "HEAD"); @@ -48,7 +50,7 @@ static char *get_default_remote(void) die(_("Expecting a full ref name, got %s"), refname); strbuf_addf(&sb, "branch.%s.remote", refname); - if (git_config_get_string(sb.buf, &dest)) + if (repo_config_get_string(repo, sb.buf, &dest)) ret = xstrdup("origin"); else ret = dest; @@ -57,19 +59,17 @@ static char *get_default_remote(void) return ret; } -static int print_default_remote(int argc, const char **argv, const char *prefix) +static char *get_default_remote_submodule(const char *module_path) { - char *remote; - - if (argc != 1) - die(_("submodule--helper print-default-remote takes no arguments")); + struct repository subrepo; - remote = get_default_remote(); - if (remote) - printf("%s\n", remote); + repo_submodule_init(&subrepo, the_repository, module_path, null_oid()); + return repo_get_default_remote(&subrepo); +} - free(remote); - return 0; +static char *get_default_remote(void) +{ + return repo_get_default_remote(the_repository); } static int starts_with_dot_slash(const char *str) @@ -247,11 +247,10 @@ static int resolve_relative_url_test(int argc, const char **argv, const char *pr return 0; } -/* the result should be freed by the caller. */ -static char *get_submodule_displaypath(const char *path, const char *prefix) +static char *do_get_submodule_displaypath(const char *path, + const char *prefix, + const char *super_prefix) { - const char *super_prefix = get_super_prefix(); - if (prefix && super_prefix) { BUG("cannot have prefix '%s' and superprefix '%s'", prefix, super_prefix); @@ -267,6 +266,13 @@ static char *get_submodule_displaypath(const char *path, const char *prefix) } } +/* the result should be freed by the caller. */ +static char *get_submodule_displaypath(const char *path, const char *prefix) +{ + const char *super_prefix = get_super_prefix(); + return do_get_submodule_displaypath(path, prefix, super_prefix); +} + static char *compute_rev_name(const char *sub_path, const char* object_id) { struct strbuf sb = STRBUF_INIT; @@ -588,18 +594,22 @@ static int module_foreach(int argc, const char **argv, const char *prefix) struct init_cb { const char *prefix; + const char *superprefix; unsigned int flags; }; #define INIT_CB_INIT { 0 } static void init_submodule(const char *path, const char *prefix, - unsigned int flags) + const char *superprefix, unsigned int flags) { const struct submodule *sub; struct strbuf sb = STRBUF_INIT; char *upd = NULL, *url = NULL, *displaypath; - displaypath = get_submodule_displaypath(path, prefix); + /* try superprefix from the environment, if it is not passed explicitly */ + if (!superprefix) + superprefix = get_super_prefix(); + displaypath = do_get_submodule_displaypath(path, prefix, superprefix); sub = submodule_from_path(the_repository, null_oid(), path); @@ -673,7 +683,7 @@ static void init_submodule(const char *path, const char *prefix, static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data) { struct init_cb *info = cb_data; - init_submodule(list_item->name, info->prefix, info->flags); + init_submodule(list_item->name, info->prefix, info->superprefix, info->flags); } static int module_init(int argc, const char **argv, const char *prefix) @@ -1343,9 +1353,8 @@ static void sync_submodule(const char *path, const char *prefix, { const struct submodule *sub; char *remote_key = NULL; - char *sub_origin_url, *super_config_url, *displaypath; + char *sub_origin_url, *super_config_url, *displaypath, *default_remote; struct strbuf sb = STRBUF_INIT; - struct child_process cp = CHILD_PROCESS_INIT; char *sub_config_path = NULL; if (!is_submodule_active(the_repository, path)) @@ -1384,21 +1393,15 @@ static void sync_submodule(const char *path, const char *prefix, if (!is_submodule_populated_gently(path, NULL)) goto cleanup; - prepare_submodule_repo_env(&cp.env_array); - cp.git_cmd = 1; - cp.dir = path; - strvec_pushl(&cp.args, "submodule--helper", - "print-default-remote", NULL); - strbuf_reset(&sb); - if (capture_command(&cp, &sb, 0)) + default_remote = get_default_remote_submodule(path); + if (!default_remote) die(_("failed to get the default remote for submodule '%s'"), path); - strbuf_strip_suffix(&sb, "\n"); - remote_key = xstrfmt("remote.%s.url", sb.buf); + remote_key = xstrfmt("remote.%s.url", default_remote); + free(default_remote); - strbuf_reset(&sb); submodule_to_gitdir(&sb, path); strbuf_addstr(&sb, "/config"); @@ -1957,29 +1960,6 @@ static void determine_submodule_update_strategy(struct repository *r, free(key); } -static int module_update_module_mode(int argc, const char **argv, const char *prefix) -{ - const char *path, *update = NULL; - int just_cloned; - struct submodule_update_strategy update_strategy = { .type = SM_UPDATE_CHECKOUT }; - - if (argc < 3 || argc > 4) - die("submodule--helper update-module-clone expects <just-cloned> <path> [<update>]"); - - just_cloned = git_config_int("just_cloned", argv[1]); - path = argv[2]; - - if (argc == 4) - update = argv[3]; - - determine_submodule_update_strategy(the_repository, - just_cloned, path, update, - &update_strategy); - fputs(submodule_strategy_to_string(&update_strategy), stdout); - - return 0; -} - struct update_clone_data { const struct submodule *sub; struct object_id oid; @@ -2020,6 +2000,7 @@ struct submodule_update_clone { int failed_clones_nr, failed_clones_alloc; int max_jobs; + unsigned int init; }; #define SUBMODULE_UPDATE_CLONE_INIT { \ .list = MODULE_LIST_INIT, \ @@ -2038,10 +2019,11 @@ struct update_data { struct object_id suboid; struct submodule_update_strategy update_strategy; int depth; - unsigned int force: 1; - unsigned int quiet: 1; - unsigned int nofetch: 1; - unsigned int just_cloned: 1; + unsigned int force; + unsigned int quiet; + unsigned int nofetch; + unsigned int just_cloned; + unsigned int remote; }; #define UPDATE_DATA_INIT { .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT } @@ -2475,6 +2457,16 @@ static int do_run_update_procedure(struct update_data *ud) return run_update_command(ud, subforce); } +/* + * NEEDSWORK: As we convert "git submodule update" to C, + * update_submodule2() will invoke more and more functions, making it + * difficult to preserve the function ordering without forward + * declarations. + * + * When the conversion is complete, this forward declaration will be + * unnecessary and should be removed. + */ +static int update_submodule2(struct update_data *update_data); static void update_submodule(struct update_clone_data *ucd) { fprintf(stdout, "dummy %s %d\t%s\n", @@ -2518,6 +2510,8 @@ static int update_clone(int argc, const char **argv, const char *prefix) int ret; struct option module_update_clone_options[] = { + OPT_BOOL(0, "init", &suc.init, + N_("initialize uninitialized submodules before update")), OPT_STRING(0, "prefix", &prefix, N_("path"), N_("path into the working tree")), @@ -2551,7 +2545,12 @@ static int update_clone(int argc, const char **argv, const char *prefix) }; const char *const git_submodule_helper_usage[] = { - N_("git submodule--helper update-clone [--prefix=<path>] [<path>...]"), + N_("git submodule [--quiet] update" + " [--init [--filter=<filter-spec>]] [--remote]" + " [-N|--no-fetch] [-f|--force]" + " [--checkout|--merge|--rebase]" + " [--[no-]recommend-shallow] [--reference <repository>]" + " [--recursive] [--[no-]single-branch] [--] [<path>...]"), NULL }; suc.prefix = prefix; @@ -2562,6 +2561,19 @@ static int update_clone(int argc, const char **argv, const char *prefix) memset(&filter_options, 0, sizeof(filter_options)); argc = parse_options(argc, argv, prefix, module_update_clone_options, git_submodule_helper_usage, 0); + + if (filter_options.choice && !suc.init) { + /* + * NEEDSWORK: Don't use usage_with_options() because the + * usage string is for "git submodule update", but the + * options are for "git submodule--helper update-clone". + * + * This will no longer be an issue when "update-clone" + * is replaced by "git submodule--helper update". + */ + usage(git_submodule_helper_usage[0]); + } + suc.filter_options = &filter_options; if (update) @@ -2576,6 +2588,29 @@ static int update_clone(int argc, const char **argv, const char *prefix) if (pathspec.nr) suc.warn_if_uninitialized = 1; + if (suc.init) { + struct module_list list = MODULE_LIST_INIT; + struct init_cb info = INIT_CB_INIT; + + if (module_list_compute(argc, argv, suc.prefix, + &pathspec, &list) < 0) + return 1; + + /* + * If there are no path args and submodule.active is set then, + * by default, only initialize 'active' modules. + */ + if (!argc && git_config_get_value_multi("submodule.active")) + module_list_active(&list); + + info.prefix = suc.prefix; + info.superprefix = suc.recursive_prefix; + if (suc.quiet) + info.flags |= OPT_QUIET; + + for_each_listed_submodule(&list, init_submodule_cb, &info); + } + ret = update_submodules(&suc); list_objects_filter_release(&filter_options); return ret; @@ -2583,16 +2618,17 @@ static int update_clone(int argc, const char **argv, const char *prefix) static int run_update_procedure(int argc, const char **argv, const char *prefix) { - int force = 0, quiet = 0, nofetch = 0, just_cloned = 0; char *prefixed_path, *update = NULL; struct update_data update_data = UPDATE_DATA_INIT; struct option options[] = { - OPT__QUIET(&quiet, N_("suppress output for update by rebase or merge")), - OPT__FORCE(&force, N_("force checkout updates"), 0), - OPT_BOOL('N', "no-fetch", &nofetch, + OPT__QUIET(&update_data.quiet, + N_("suppress output for update by rebase or merge")), + OPT__FORCE(&update_data.force, N_("force checkout updates"), + 0), + OPT_BOOL('N', "no-fetch", &update_data.nofetch, N_("don't fetch new objects from the remote site")), - OPT_BOOL(0, "just-cloned", &just_cloned, + OPT_BOOL(0, "just-cloned", &update_data.just_cloned, N_("overrides update mode in case the repository is a fresh clone")), OPT_INTEGER(0, "depth", &update_data.depth, N_("depth for shallow fetch")), OPT_STRING(0, "prefix", &prefix, @@ -2607,9 +2643,8 @@ static int run_update_procedure(int argc, const char **argv, const char *prefix) OPT_CALLBACK_F(0, "oid", &update_data.oid, N_("sha1"), N_("SHA1 expected by superproject"), PARSE_OPT_NONEG, parse_opt_object_id), - OPT_CALLBACK_F(0, "suboid", &update_data.suboid, N_("subsha1"), - N_("SHA1 of submodule's HEAD"), PARSE_OPT_NONEG, - parse_opt_object_id), + OPT_BOOL(0, "remote", &update_data.remote, + N_("use SHA-1 of submodule's remote tracking branch")), OPT_END() }; @@ -2623,10 +2658,6 @@ static int run_update_procedure(int argc, const char **argv, const char *prefix) if (argc != 1) usage_with_options(usage, options); - update_data.force = !!force; - update_data.quiet = !!quiet; - update_data.nofetch = !!nofetch; - update_data.just_cloned = !!just_cloned; update_data.sm_path = argv[0]; if (update_data.recursive_prefix) @@ -2641,11 +2672,7 @@ static int run_update_procedure(int argc, const char **argv, const char *prefix) &update_data.update_strategy); free(prefixed_path); - - if (!oideq(&update_data.oid, &update_data.suboid) || update_data.force) - return do_run_update_procedure(&update_data); - - return 3; + return update_submodule2(&update_data); } static int resolve_relative_path(int argc, const char **argv, const char *prefix) @@ -2697,23 +2724,6 @@ static const char *remote_submodule_branch(const char *path) return branch; } -static int resolve_remote_submodule_branch(int argc, const char **argv, - const char *prefix) -{ - const char *ret; - struct strbuf sb = STRBUF_INIT; - if (argc != 2) - die("submodule--helper remote-branch takes exactly one arguments, got %d", argc); - - ret = remote_submodule_branch(argv[1]); - if (!ret) - die("submodule %s doesn't exist", argv[1]); - - printf("%s", ret); - strbuf_release(&sb); - return 0; -} - static int push_check(int argc, const char **argv, const char *prefix) { struct remote *remote; @@ -2791,17 +2801,11 @@ static int push_check(int argc, const char **argv, const char *prefix) return 0; } -static int ensure_core_worktree(int argc, const char **argv, const char *prefix) +static void ensure_core_worktree(const char *path) { - const char *path; const char *cw; struct repository subrepo; - if (argc != 2) - BUG("submodule--helper ensure-core-worktree <path>"); - - path = argv[1]; - if (repo_submodule_init(&subrepo, the_repository, path, null_oid())) die(_("could not get a repository handle for submodule '%s'"), path); @@ -2821,8 +2825,6 @@ static int ensure_core_worktree(int argc, const char **argv, const char *prefix) free(abs_path); strbuf_release(&sb); } - - return 0; } static int absorb_git_dirs(int argc, const char **argv, const char *prefix) @@ -3045,6 +3047,42 @@ static int module_create_branch(int argc, const char **argv, const char *prefix) force, reflog, quiet, track, dry_run); return 0; } + +/* NEEDSWORK: this is a temporary name until we delete update_submodule() */ +static int update_submodule2(struct update_data *update_data) +{ + ensure_core_worktree(update_data->sm_path); + if (update_data->just_cloned) + oidcpy(&update_data->suboid, null_oid()); + else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid)) + die(_("Unable to find current revision in submodule path '%s'"), + update_data->displaypath); + + if (update_data->remote) { + char *remote_name = get_default_remote_submodule(update_data->sm_path); + const char *branch = remote_submodule_branch(update_data->sm_path); + char *remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch); + + if (!update_data->nofetch) { + if (fetch_in_submodule(update_data->sm_path, update_data->depth, + 0, NULL)) + die(_("Unable to fetch in submodule path '%s'"), + update_data->sm_path); + } + + if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid)) + die(_("Unable to find %s revision in submodule path '%s'"), + remote_ref, update_data->sm_path); + + free(remote_ref); + } + + if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force) + return do_run_update_procedure(update_data); + + return 3; +} + struct add_data { const char *prefix; const char *branch; @@ -3309,6 +3347,7 @@ static int module_add(int argc, const char **argv, const char *prefix) { int force = 0, quiet = 0, progress = 0, dissociate = 0; struct add_data add_data = ADD_DATA_INIT; + char *to_free = NULL; struct option options[] = { OPT_STRING('b', "branch", &add_data.branch, N_("branch"), @@ -3360,7 +3399,8 @@ static int module_add(int argc, const char **argv, const char *prefix) "of the working tree")); /* dereference source url relative to parent's url */ - add_data.realrepo = resolve_relative_url(add_data.repo, NULL, 1); + to_free = resolve_relative_url(add_data.repo, NULL, 1); + add_data.realrepo = to_free; } else if (is_dir_sep(add_data.repo[0]) || strchr(add_data.repo, ':')) { add_data.realrepo = add_data.repo; } else { @@ -3413,6 +3453,7 @@ static int module_add(int argc, const char **argv, const char *prefix) } configure_added_submodule(&add_data); free(add_data.sm_path); + free(to_free); return 0; } @@ -3430,20 +3471,16 @@ static struct cmd_struct commands[] = { {"name", module_name, 0}, {"clone", module_clone, 0}, {"add", module_add, SUPPORT_SUPER_PREFIX}, - {"update-module-mode", module_update_module_mode, 0}, {"update-clone", update_clone, 0}, {"run-update-procedure", run_update_procedure, 0}, - {"ensure-core-worktree", ensure_core_worktree, 0}, {"relative-path", resolve_relative_path, 0}, {"resolve-relative-url-test", resolve_relative_url_test, 0}, {"foreach", module_foreach, SUPPORT_SUPER_PREFIX}, {"init", module_init, SUPPORT_SUPER_PREFIX}, {"status", module_status, SUPPORT_SUPER_PREFIX}, - {"print-default-remote", print_default_remote, 0}, {"sync", module_sync, SUPPORT_SUPER_PREFIX}, {"deinit", module_deinit, 0}, {"summary", module_summary, SUPPORT_SUPER_PREFIX}, - {"remote-branch", resolve_remote_submodule_branch, 0}, {"push-check", push_check, 0}, {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX}, {"is-active", is_active, 0}, diff --git a/builtin/tag.c b/builtin/tag.c index 2479da0704..e5a8f85693 100644 --- a/builtin/tag.c +++ b/builtin/tag.c @@ -239,7 +239,7 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu { if (sign && do_sign(buf) < 0) return error(_("unable to sign the tag")); - if (write_object_file(buf->buf, buf->len, tag_type, result) < 0) + if (write_object_file(buf->buf, buf->len, OBJ_TAG, result) < 0) return error(_("unable to write tag file")); return 0; } diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c index 4a9466295b..dbeb0680a5 100644 --- a/builtin/unpack-objects.c +++ b/builtin/unpack-objects.c @@ -177,7 +177,7 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf) struct object_id oid; if (write_object_file(obj_buf->buffer, obj_buf->size, - type_name(obj->type), &oid) < 0) + obj->type, &oid) < 0) die("failed to write object %s", oid_to_hex(&obj->oid)); obj->flags |= FLAG_WRITTEN; } @@ -243,7 +243,7 @@ static void write_object(unsigned nr, enum object_type type, void *buf, unsigned long size) { if (!strict) { - if (write_object_file(buf, size, type_name(type), + if (write_object_file(buf, size, type, &obj_list[nr].oid) < 0) die("failed to write object"); added_object(nr, type, buf, size); @@ -251,7 +251,7 @@ static void write_object(unsigned nr, enum object_type type, obj_list[nr].obj = NULL; } else if (type == OBJ_BLOB) { struct blob *blob; - if (write_object_file(buf, size, type_name(type), + if (write_object_file(buf, size, type, &obj_list[nr].oid) < 0) die("failed to write object"); added_object(nr, type, buf, size); @@ -266,7 +266,7 @@ static void write_object(unsigned nr, enum object_type type, } else { struct object *obj; int eaten; - hash_object_file(the_hash_algo, buf, size, type_name(type), + hash_object_file(the_hash_algo, buf, size, type, &obj_list[nr].oid); added_object(nr, type, buf, size); obj = parse_object_buffer(the_repository, &obj_list[nr].oid, diff --git a/bulk-checkin.c b/bulk-checkin.c index 8785b2ac80..6d6c37171c 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -53,9 +53,10 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state) unlink(state->pack_tmp_name); goto clear_exit; } else if (state->nr_written == 1) { - finalize_hashfile(state->f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); + finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, + CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); } else { - int fd = finalize_hashfile(state->f, hash, 0); + int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0); fixup_pack_header_footer(fd, hash, state->pack_tmp_name, state->nr_written, hash, state->offset); @@ -220,8 +221,8 @@ static int deflate_to_pack(struct bulk_checkin_state *state, if (seekback == (off_t) -1) return error("cannot find the current offset"); - header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX, - type_name(type), (uintmax_t)size) + 1; + header_len = format_object_header((char *)obuf, sizeof(obuf), + type, size); the_hash_algo->init_fn(&ctx); the_hash_algo->update_fn(&ctx, obuf, header_len); @@ -11,7 +11,7 @@ #include "run-command.h" #include "refs.h" #include "strvec.h" - +#include "list-objects-filter-options.h" static const char v2_bundle_signature[] = "# v2 git bundle\n"; static const char v3_bundle_signature[] = "# v3 git bundle\n"; @@ -33,6 +33,7 @@ void bundle_header_release(struct bundle_header *header) { string_list_clear(&header->prerequisites, 1); string_list_clear(&header->references, 1); + list_objects_filter_release(&header->filter); } static int parse_capability(struct bundle_header *header, const char *capability) @@ -45,6 +46,10 @@ static int parse_capability(struct bundle_header *header, const char *capability header->hash_algo = &hash_algos[algo]; return 0; } + if (skip_prefix(capability, "filter=", &arg)) { + parse_list_objects_filter(&header->filter, arg); + return 0; + } return error(_("unknown capability '%s'"), capability); } @@ -220,6 +225,8 @@ int verify_bundle(struct repository *r, req_nr = revs.pending.nr; setup_revisions(2, argv, &revs, NULL); + list_objects_filter_copy(&revs.filter, &header->filter); + if (prepare_revision_walk(&revs)) die(_("revision walk setup failed")); @@ -255,18 +262,24 @@ int verify_bundle(struct repository *r, r = &header->references; printf_ln(Q_("The bundle contains this ref:", - "The bundle contains these %d refs:", + "The bundle contains these %"PRIuMAX" refs:", r->nr), - r->nr); + (uintmax_t)r->nr); list_refs(r, 0, NULL); + + if (header->filter.choice) { + printf_ln("The bundle uses this filter: %s", + list_objects_filter_spec(&header->filter)); + } + r = &header->prerequisites; if (!r->nr) { printf_ln(_("The bundle records a complete history.")); } else { printf_ln(Q_("The bundle requires this ref:", - "The bundle requires these %d refs:", + "The bundle requires these %"PRIuMAX" refs:", r->nr), - r->nr); + (uintmax_t)r->nr); list_refs(r, 0, NULL); } } @@ -319,6 +332,9 @@ static int write_pack_data(int bundle_fd, struct rev_info *revs, struct strvec * "--stdout", "--thin", "--delta-base-offset", NULL); strvec_pushv(&pack_objects.args, pack_options->v); + if (revs->filter.choice) + strvec_pushf(&pack_objects.args, "--filter=%s", + list_objects_filter_spec(&revs->filter)); pack_objects.in = -1; pack_objects.out = bundle_fd; pack_objects.git_cmd = 1; @@ -486,10 +502,37 @@ int create_bundle(struct repository *r, const char *path, int bundle_to_stdout; int ref_count = 0; struct rev_info revs, revs_copy; - int min_version = the_hash_algo == &hash_algos[GIT_HASH_SHA1] ? 2 : 3; + int min_version = 2; struct bundle_prerequisites_info bpi; int i; + /* init revs to list objects for pack-objects later */ + save_commit_buffer = 0; + repo_init_revisions(r, &revs, NULL); + + /* + * Pre-initialize the '--objects' flag so we can parse a + * --filter option successfully. + */ + revs.tree_objects = revs.blob_objects = 1; + + argc = setup_revisions(argc, argv, &revs, NULL); + + /* + * Reasons to require version 3: + * + * 1. @object-format is required because our hash algorithm is not + * SHA1. + * 2. @filter is required because we parsed an object filter. + */ + if (the_hash_algo != &hash_algos[GIT_HASH_SHA1] || revs.filter.choice) + min_version = 3; + + if (argc > 1) { + error(_("unrecognized argument: %s"), argv[1]); + goto err; + } + bundle_to_stdout = !strcmp(path, "-"); if (bundle_to_stdout) bundle_fd = 1; @@ -512,17 +555,14 @@ int create_bundle(struct repository *r, const char *path, write_or_die(bundle_fd, capability, strlen(capability)); write_or_die(bundle_fd, the_hash_algo->name, strlen(the_hash_algo->name)); write_or_die(bundle_fd, "\n", 1); - } - /* init revs to list objects for pack-objects later */ - save_commit_buffer = 0; - repo_init_revisions(r, &revs, NULL); - - argc = setup_revisions(argc, argv, &revs, NULL); - - if (argc > 1) { - error(_("unrecognized argument: %s"), argv[1]); - goto err; + if (revs.filter.choice) { + const char *value = expand_list_objects_filter_spec(&revs.filter); + capability = "@filter="; + write_or_die(bundle_fd, capability, strlen(capability)); + write_or_die(bundle_fd, value, strlen(value)); + write_or_die(bundle_fd, "\n", 1); + } } /* save revs.pending in revs_copy for later use */ @@ -544,6 +584,12 @@ int create_bundle(struct repository *r, const char *path, die("revision walk setup failed"); bpi.fd = bundle_fd; bpi.pending = &revs_copy.pending; + + /* + * Remove any object walking here. We only care about commits and + * tags here. The revs_copy has the right instances of these values. + */ + revs.blob_objects = revs.tree_objects = 0; traverse_commit_list(&revs, write_bundle_prerequisites, NULL, &bpi); object_array_remove_duplicates(&revs_copy.pending); @@ -574,6 +620,10 @@ int unbundle(struct repository *r, struct bundle_header *header, struct child_process ip = CHILD_PROCESS_INIT; strvec_pushl(&ip.args, "index-pack", "--fix-thin", "--stdin", NULL); + /* If there is a filter, then we need to create the promisor pack. */ + if (header->filter.choice) + strvec_push(&ip.args, "--promisor=from-bundle"); + if (extra_index_pack_args) { strvec_pushv(&ip.args, extra_index_pack_args->v); strvec_clear(extra_index_pack_args); @@ -4,12 +4,14 @@ #include "strvec.h" #include "cache.h" #include "string-list.h" +#include "list-objects-filter-options.h" struct bundle_header { unsigned version; struct string_list prerequisites; struct string_list references; const struct git_hash_algo *hash_algo; + struct list_objects_filter_options filter; }; #define BUNDLE_HEADER_INIT \ diff --git a/cache-tree.c b/cache-tree.c index 65ca993361..6752f69d51 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -432,15 +432,15 @@ static int update_one(struct cache_tree *it, if (repair) { struct object_id oid; hash_object_file(the_hash_algo, buffer.buf, buffer.len, - tree_type, &oid); + OBJ_TREE, &oid); if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT)) oidcpy(&it->oid, &oid); else to_invalidate = 1; } else if (dryrun) { hash_object_file(the_hash_algo, buffer.buf, buffer.len, - tree_type, &it->oid); - } else if (write_object_file_flags(buffer.buf, buffer.len, tree_type, + OBJ_TREE, &it->oid); + } else if (write_object_file_flags(buffer.buf, buffer.len, OBJ_TREE, &it->oid, flags & WRITE_TREE_SILENT ? HASH_SILENT : 0)) { strbuf_release(&buffer); @@ -948,7 +948,7 @@ static int verify_one(struct repository *r, strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0'); strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz); } - hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, tree_type, + hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, OBJ_TREE, &new_oid); if (!oideq(&new_oid, &it->oid)) BUG("cache-tree for path %.*s does not match. " @@ -993,8 +993,57 @@ void reset_shared_repository(void); extern int read_replace_refs; extern char *git_replace_ref_base; +/* + * These values are used to help identify parts of a repository to fsync. + * FSYNC_COMPONENT_NONE identifies data that will not be a persistent part of the + * repository and so shouldn't be fsynced. + */ +enum fsync_component { + FSYNC_COMPONENT_NONE, + FSYNC_COMPONENT_LOOSE_OBJECT = 1 << 0, + FSYNC_COMPONENT_PACK = 1 << 1, + FSYNC_COMPONENT_PACK_METADATA = 1 << 2, + FSYNC_COMPONENT_COMMIT_GRAPH = 1 << 3, + FSYNC_COMPONENT_INDEX = 1 << 4, + FSYNC_COMPONENT_REFERENCE = 1 << 5, +}; + +#define FSYNC_COMPONENTS_OBJECTS (FSYNC_COMPONENT_LOOSE_OBJECT | \ + FSYNC_COMPONENT_PACK) + +#define FSYNC_COMPONENTS_DERIVED_METADATA (FSYNC_COMPONENT_PACK_METADATA | \ + FSYNC_COMPONENT_COMMIT_GRAPH) + +#define FSYNC_COMPONENTS_DEFAULT (FSYNC_COMPONENTS_OBJECTS | \ + FSYNC_COMPONENTS_DERIVED_METADATA | \ + ~FSYNC_COMPONENT_LOOSE_OBJECT) + +#define FSYNC_COMPONENTS_COMMITTED (FSYNC_COMPONENTS_OBJECTS | \ + FSYNC_COMPONENT_REFERENCE) + +#define FSYNC_COMPONENTS_ADDED (FSYNC_COMPONENTS_COMMITTED | \ + FSYNC_COMPONENT_INDEX) + +#define FSYNC_COMPONENTS_ALL (FSYNC_COMPONENT_LOOSE_OBJECT | \ + FSYNC_COMPONENT_PACK | \ + FSYNC_COMPONENT_PACK_METADATA | \ + FSYNC_COMPONENT_COMMIT_GRAPH | \ + FSYNC_COMPONENT_INDEX | \ + FSYNC_COMPONENT_REFERENCE) + +/* + * A bitmask indicating which components of the repo should be fsynced. + */ +extern enum fsync_component fsync_components; extern int fsync_object_files; extern int use_fsync; + +enum fsync_method { + FSYNC_METHOD_FSYNC, + FSYNC_METHOD_WRITEOUT_ONLY +}; + +extern enum fsync_method fsync_method; extern int core_preload_index; extern int precomposed_unicode; extern int protect_hfs; @@ -1003,6 +1052,7 @@ extern const char *core_fsmonitor; extern int core_apply_sparse_checkout; extern int core_sparse_checkout_cone; +extern int sparse_expect_files_outside_of_patterns; /* * Returns the boolean value of $GIT_OPTIONAL_LOCKS (or the default value). @@ -1319,9 +1369,23 @@ enum unpack_loose_header_result unpack_loose_header(git_zstream *stream, struct object_info; int parse_loose_header(const char *hdr, struct object_info *oi); +/** + * With in-core object data in "buf", rehash it to make sure the + * object name actually matches "oid" to detect object corruption. + * + * A negative value indicates an error, usually that the OID is not + * what we expected, but it might also indicate another error. + */ int check_object_signature(struct repository *r, const struct object_id *oid, - void *buf, unsigned long size, const char *type, - struct object_id *real_oidp); + void *map, unsigned long size, + enum object_type type); + +/** + * A streaming version of check_object_signature(). + * Try reading the object named with "oid" using + * the streaming interface and rehash it to do the same. + */ +int stream_object_signature(struct repository *r, const struct object_id *oid); int finalize_object_file(const char *tmpfile, const char *filename); @@ -1548,7 +1612,7 @@ int cache_name_stage_compare(const char *name1, int len1, int stage1, const char void *read_object_with_reference(struct repository *r, const struct object_id *oid, - const char *required_type, + enum object_type required_type, unsigned long *size, struct object_id *oid_ret); @@ -1700,6 +1764,8 @@ int copy_file_with_time(const char *dst, const char *src, int mode); void write_or_die(int fd, const void *buf, size_t count); void fsync_or_die(int fd, const char *); +int fsync_component(enum fsync_component component, int fd); +void fsync_component_or_die(enum fsync_component component, int fd, const char *msg); ssize_t read_in_full(int fd, void *buf, size_t count); ssize_t write_in_full(int fd, const void *buf, size_t count); diff --git a/commit-graph.c b/commit-graph.c index 265c010122..441b36016b 100644 --- a/commit-graph.c +++ b/commit-graph.c @@ -39,8 +39,8 @@ void git_test_write_commit_graph_or_die(void) #define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */ #define GRAPH_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */ #define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */ -#define GRAPH_CHUNKID_GENERATION_DATA 0x47444154 /* "GDAT" */ -#define GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW 0x47444f56 /* "GDOV" */ +#define GRAPH_CHUNKID_GENERATION_DATA 0x47444132 /* "GDA2" */ +#define GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW 0x47444f32 /* "GDO2" */ #define GRAPH_CHUNKID_EXTRAEDGES 0x45444745 /* "EDGE" */ #define GRAPH_CHUNKID_BLOOMINDEXES 0x42494458 /* "BIDX" */ #define GRAPH_CHUNKID_BLOOMDATA 0x42444154 /* "BDAT" */ @@ -407,6 +407,9 @@ struct commit_graph *parse_commit_graph(struct repository *r, &graph->chunk_generation_data); pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW, &graph->chunk_generation_data_overflow); + + if (graph->chunk_generation_data) + graph->read_generation_data = 1; } if (r->settings.commit_graph_read_changed_paths) { @@ -803,7 +806,7 @@ static void fill_commit_graph_info(struct commit *item, struct commit_graph *g, die(_("commit-graph requires overflow generation data but has none")); offset_pos = offset ^ CORRECTED_COMMIT_DATE_OFFSET_OVERFLOW; - graph_data->generation = get_be64(g->chunk_generation_data_overflow + 8 * offset_pos); + graph_data->generation = item->date + get_be64(g->chunk_generation_data_overflow + 8 * offset_pos); } else graph_data->generation = item->date + offset; } else @@ -1556,12 +1559,16 @@ static void compute_generation_numbers(struct write_commit_graph_context *ctx) if (current->date && current->date > max_corrected_commit_date) max_corrected_commit_date = current->date - 1; commit_graph_data_at(current)->generation = max_corrected_commit_date + 1; - - if (commit_graph_data_at(current)->generation - current->date > GENERATION_NUMBER_V2_OFFSET_MAX) - ctx->num_generation_data_overflows++; } } } + + for (i = 0; i < ctx->commits.nr; i++) { + struct commit *c = ctx->commits.list[i]; + timestamp_t offset = commit_graph_data_at(c)->generation - c->date; + if (offset > GENERATION_NUMBER_V2_OFFSET_MAX) + ctx->num_generation_data_overflows++; + } stop_progress(&ctx->progress); } @@ -1679,21 +1686,22 @@ int write_commit_graph_reachable(struct object_directory *odb, } static int fill_oids_from_packs(struct write_commit_graph_context *ctx, - struct string_list *pack_indexes) + const struct string_list *pack_indexes) { uint32_t i; struct strbuf progress_title = STRBUF_INIT; struct strbuf packname = STRBUF_INIT; int dirlen; + int ret = 0; strbuf_addf(&packname, "%s/pack/", ctx->odb->path); dirlen = packname.len; if (ctx->report_progress) { strbuf_addf(&progress_title, - Q_("Finding commits for commit graph in %d pack", - "Finding commits for commit graph in %d packs", + Q_("Finding commits for commit graph in %"PRIuMAX" pack", + "Finding commits for commit graph in %"PRIuMAX" packs", pack_indexes->nr), - pack_indexes->nr); + (uintmax_t)pack_indexes->nr); ctx->progress = start_delayed_progress(progress_title.buf, 0); ctx->progress_done = 0; } @@ -1703,12 +1711,12 @@ static int fill_oids_from_packs(struct write_commit_graph_context *ctx, strbuf_addstr(&packname, pack_indexes->items[i].string); p = add_packed_git(packname.buf, packname.len, 1); if (!p) { - error(_("error adding pack %s"), packname.buf); - return -1; + ret = error(_("error adding pack %s"), packname.buf); + goto cleanup; } if (open_pack_index(p)) { - error(_("error opening index for %s"), packname.buf); - return -1; + ret = error(_("error opening index for %s"), packname.buf); + goto cleanup; } for_each_object_in_pack(p, add_packed_commits, ctx, FOR_EACH_OBJECT_PACK_ORDER); @@ -1716,11 +1724,12 @@ static int fill_oids_from_packs(struct write_commit_graph_context *ctx, free(p); } +cleanup: stop_progress(&ctx->progress); strbuf_release(&progress_title); strbuf_release(&packname); - return 0; + return ret; } static int fill_oids_from_commits(struct write_commit_graph_context *ctx, @@ -1852,6 +1861,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx) hold_lock_file_for_update_mode(&lk, lock_name, LOCK_DIE_ON_ERROR, 0444); + free(lock_name); fd = git_mkstemp_mode(ctx->graph_name, 0444); if (fd < 0) { @@ -1942,7 +1952,8 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx) } close_commit_graph(ctx->r->objects); - finalize_hashfile(f, file_hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC); + finalize_hashfile(f, file_hash, FSYNC_COMPONENT_COMMIT_GRAPH, + CSUM_HASH_IN_STREAM | CSUM_FSYNC); free_chunkfile(cf); if (ctx->split) { @@ -1976,6 +1987,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx) } else { char *graph_name = get_commit_graph_filename(ctx->odb); unlink(graph_name); + free(graph_name); } ctx->commit_graph_hash_after[ctx->num_commit_graphs_after - 1] = xstrdup(hash_to_hex(file_hash)); @@ -2259,7 +2271,7 @@ out: } int write_commit_graph(struct object_directory *odb, - struct string_list *pack_indexes, + const struct string_list *const pack_indexes, struct oidset *commits, enum commit_graph_write_flags flags, const struct commit_graph_opts *opts) diff --git a/commit-graph.h b/commit-graph.h index 04a94e1830..2e3ac35237 100644 --- a/commit-graph.h +++ b/commit-graph.h @@ -142,7 +142,7 @@ int write_commit_graph_reachable(struct object_directory *odb, enum commit_graph_write_flags flags, const struct commit_graph_opts *opts); int write_commit_graph(struct object_directory *odb, - struct string_list *pack_indexes, + const struct string_list *pack_indexes, struct oidset *commits, enum commit_graph_write_flags flags, const struct commit_graph_opts *opts); @@ -249,6 +249,16 @@ int for_each_commit_graft(each_commit_graft_fn fn, void *cb_data) return ret; } +void reset_commit_grafts(struct repository *r) +{ + int i; + + for (i = 0; i < r->parsed_objects->grafts_nr; i++) + free(r->parsed_objects->grafts[i]); + r->parsed_objects->grafts_nr = 0; + r->parsed_objects->commit_graft_prepared = 0; +} + struct commit_buffer { void *buffer; unsigned long size; @@ -1568,7 +1578,7 @@ int commit_tree_extended(const char *msg, size_t msg_len, goto out; } - result = write_object_file(buffer.buf, buffer.len, commit_type, ret); + result = write_object_file(buffer.buf, buffer.len, OBJ_COMMIT, ret); out: strbuf_release(&buffer); return result; @@ -249,6 +249,7 @@ int commit_graft_pos(struct repository *r, const struct object_id *oid); int register_commit_graft(struct repository *r, struct commit_graft *, int); void prepare_commit_graft(struct repository *r); struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid); +void reset_commit_grafts(struct repository *r); struct commit *get_fork_point(const char *refname, struct commit *commit); diff --git a/compat/mingw.c b/compat/mingw.c index 03af369b2b..58f347d6ae 100644 --- a/compat/mingw.c +++ b/compat/mingw.c @@ -961,9 +961,11 @@ static inline void time_t_to_filetime(time_t t, FILETIME *ft) int mingw_utime (const char *file_name, const struct utimbuf *times) { FILETIME mft, aft; - int fh, rc; + int rc; DWORD attrs; wchar_t wfilename[MAX_PATH]; + HANDLE osfilehandle; + if (xutftowcs_path(wfilename, file_name) < 0) return -1; @@ -975,7 +977,17 @@ int mingw_utime (const char *file_name, const struct utimbuf *times) SetFileAttributesW(wfilename, attrs & ~FILE_ATTRIBUTE_READONLY); } - if ((fh = _wopen(wfilename, O_RDWR | O_BINARY)) < 0) { + osfilehandle = CreateFileW(wfilename, + FILE_WRITE_ATTRIBUTES, + 0 /*FileShare.None*/, + NULL, + OPEN_EXISTING, + (attrs != INVALID_FILE_ATTRIBUTES && + (attrs & FILE_ATTRIBUTE_DIRECTORY)) ? + FILE_FLAG_BACKUP_SEMANTICS : 0, + NULL); + if (osfilehandle == INVALID_HANDLE_VALUE) { + errno = err_win_to_posix(GetLastError()); rc = -1; goto revert_attrs; } @@ -987,12 +999,15 @@ int mingw_utime (const char *file_name, const struct utimbuf *times) GetSystemTimeAsFileTime(&mft); aft = mft; } - if (!SetFileTime((HANDLE)_get_osfhandle(fh), NULL, &aft, &mft)) { + + if (!SetFileTime(osfilehandle, NULL, &aft, &mft)) { errno = EINVAL; rc = -1; } else rc = 0; - close(fh); + + if (osfilehandle != INVALID_HANDLE_VALUE) + CloseHandle(osfilehandle); revert_attrs: if (attrs != INVALID_FILE_ATTRIBUTES && diff --git a/compat/mingw.h b/compat/mingw.h index c9a52ad64a..6074a3d3ce 100644 --- a/compat/mingw.h +++ b/compat/mingw.h @@ -329,6 +329,9 @@ int mingw_getpagesize(void); #define getpagesize mingw_getpagesize #endif +int win32_fsync_no_flush(int fd); +#define fsync_no_flush win32_fsync_no_flush + struct rlimit { unsigned int rlim_cur; }; diff --git a/compat/terminal.c b/compat/terminal.c index 5b903e7c7e..3620184e79 100644 --- a/compat/terminal.c +++ b/compat/terminal.c @@ -11,7 +11,7 @@ static void restore_term_on_signal(int sig) { restore_term(); - sigchain_pop(sig); + /* restore_term calls sigchain_pop_common */ raise(sig); } @@ -31,14 +31,20 @@ void restore_term(void) tcsetattr(term_fd, TCSAFLUSH, &old_term); close(term_fd); term_fd = -1; + sigchain_pop_common(); } int save_term(int full_duplex) { if (term_fd < 0) term_fd = open("/dev/tty", O_RDWR); + if (term_fd < 0) + return -1; + if (tcgetattr(term_fd, &old_term) < 0) + return -1; + sigchain_push_common(restore_term_on_signal); - return (term_fd < 0) ? -1 : tcgetattr(term_fd, &old_term); + return 0; } static int disable_bits(tcflag_t bits) @@ -49,12 +55,16 @@ static int disable_bits(tcflag_t bits) goto error; t = old_term; - sigchain_push_common(restore_term_on_signal); t.c_lflag &= ~bits; + if (bits & ICANON) { + t.c_cc[VMIN] = 1; + t.c_cc[VTIME] = 0; + } if (!tcsetattr(term_fd, TCSAFLUSH, &t)) return 0; + sigchain_pop_common(); error: close(term_fd); term_fd = -1; @@ -100,6 +110,8 @@ void restore_term(void) return; } + sigchain_pop_common(); + if (hconin == INVALID_HANDLE_VALUE) return; @@ -134,6 +146,7 @@ int save_term(int full_duplex) GetConsoleMode(hconin, &cmode_in); use_stty = 0; + sigchain_push_common(restore_term_on_signal); return 0; error: CloseHandle(hconin); @@ -150,7 +163,11 @@ static int disable_bits(DWORD bits) if (bits & ENABLE_LINE_INPUT) { string_list_append(&stty_restore, "icanon"); - strvec_push(&cp.args, "-icanon"); + /* + * POSIX allows VMIN and VTIME to overlap with VEOF and + * VEOL - let's hope that is not the case on windows. + */ + strvec_pushl(&cp.args, "-icanon", "min", "1", "time", "0", NULL); } if (bits & ENABLE_ECHO_INPUT) { @@ -177,10 +194,10 @@ static int disable_bits(DWORD bits) if (save_term(0) < 0) return -1; - sigchain_push_common(restore_term_on_signal); if (!SetConsoleMode(hconin, cmode_in & ~bits)) { CloseHandle(hconin); hconin = INVALID_HANDLE_VALUE; + sigchain_pop_common(); return -1; } @@ -385,7 +402,7 @@ int read_key_without_echo(struct strbuf *buf) ch = getchar(); if (ch == EOF) - return 0; + break; strbuf_addch(buf, ch); } } diff --git a/compat/terminal.h b/compat/terminal.h index e1770c575b..0fb9fa147c 100644 --- a/compat/terminal.h +++ b/compat/terminal.h @@ -1,7 +1,15 @@ #ifndef COMPAT_TERMINAL_H #define COMPAT_TERMINAL_H +/* + * Save the terminal attributes so they can be restored later by a + * call to restore_term(). Note that every successful call to + * save_term() must be matched by a call to restore_term() even if the + * attributes have not been changed. Returns 0 on success, -1 on + * failure. + */ int save_term(int full_duplex); +/* Restore the terminal attributes that were saved with save_term() */ void restore_term(void); char *git_terminal_prompt(const char *prompt, int echo); diff --git a/compat/win32/flush.c b/compat/win32/flush.c new file mode 100644 index 0000000000..291f90ea94 --- /dev/null +++ b/compat/win32/flush.c @@ -0,0 +1,28 @@ +#include "git-compat-util.h" +#include <winternl.h> +#include "lazyload.h" + +int win32_fsync_no_flush(int fd) +{ + IO_STATUS_BLOCK io_status; + +#define FLUSH_FLAGS_FILE_DATA_ONLY 1 + + DECLARE_PROC_ADDR(ntdll.dll, NTSTATUS, NTAPI, NtFlushBuffersFileEx, + HANDLE FileHandle, ULONG Flags, PVOID Parameters, ULONG ParameterSize, + PIO_STATUS_BLOCK IoStatusBlock); + + if (!INIT_PROC_ADDR(NtFlushBuffersFileEx)) { + errno = ENOSYS; + return -1; + } + + memset(&io_status, 0, sizeof(io_status)); + if (NtFlushBuffersFileEx((HANDLE)_get_osfhandle(fd), FLUSH_FLAGS_FILE_DATA_ONLY, + NULL, 0, &io_status)) { + errno = EINVAL; + return -1; + } + + return 0; +} diff --git a/compat/winansi.c b/compat/winansi.c index 936a80a5f0..3abe8dd5a2 100644 --- a/compat/winansi.c +++ b/compat/winansi.c @@ -4,11 +4,6 @@ #undef NOGDI -/* - * Including the appropriate header file for RtlGenRandom causes MSVC to see a - * redefinition of types in an incompatible way when including headers below. - */ -#undef HAVE_RTLGENRANDOM #include "../git-compat-util.h" #include <wingdi.h> #include <winreg.h> @@ -1323,6 +1323,80 @@ static int git_parse_maybe_bool_text(const char *value) return -1; } +static const struct fsync_component_name { + const char *name; + enum fsync_component component_bits; +} fsync_component_names[] = { + { "loose-object", FSYNC_COMPONENT_LOOSE_OBJECT }, + { "pack", FSYNC_COMPONENT_PACK }, + { "pack-metadata", FSYNC_COMPONENT_PACK_METADATA }, + { "commit-graph", FSYNC_COMPONENT_COMMIT_GRAPH }, + { "index", FSYNC_COMPONENT_INDEX }, + { "objects", FSYNC_COMPONENTS_OBJECTS }, + { "reference", FSYNC_COMPONENT_REFERENCE }, + { "derived-metadata", FSYNC_COMPONENTS_DERIVED_METADATA }, + { "committed", FSYNC_COMPONENTS_COMMITTED }, + { "added", FSYNC_COMPONENTS_ADDED }, + { "all", FSYNC_COMPONENTS_ALL }, +}; + +static enum fsync_component parse_fsync_components(const char *var, const char *string) +{ + enum fsync_component current = FSYNC_COMPONENTS_DEFAULT; + enum fsync_component positive = 0, negative = 0; + + while (string) { + int i; + size_t len; + const char *ep; + int negated = 0; + int found = 0; + + string = string + strspn(string, ", \t\n\r"); + ep = strchrnul(string, ','); + len = ep - string; + if (!strcmp(string, "none")) { + current = FSYNC_COMPONENT_NONE; + goto next_name; + } + + if (*string == '-') { + negated = 1; + string++; + len--; + if (!len) + warning(_("invalid value for variable %s"), var); + } + + if (!len) + break; + + for (i = 0; i < ARRAY_SIZE(fsync_component_names); ++i) { + const struct fsync_component_name *n = &fsync_component_names[i]; + + if (strncmp(n->name, string, len)) + continue; + + found = 1; + if (negated) + negative |= n->component_bits; + else + positive |= n->component_bits; + } + + if (!found) { + char *component = xstrndup(string, len); + warning(_("ignoring unknown core.fsync component '%s'"), component); + free(component); + } + +next_name: + string = ep; + } + + return (current & ~negative) | positive; +} + int git_parse_maybe_bool(const char *value) { int v = git_parse_maybe_bool_text(value); @@ -1600,7 +1674,28 @@ static int git_default_core_config(const char *var, const char *value, void *cb) return 0; } + if (!strcmp(var, "core.fsync")) { + if (!value) + return config_error_nonbool(var); + fsync_components = parse_fsync_components(var, value); + return 0; + } + + if (!strcmp(var, "core.fsyncmethod")) { + if (!value) + return config_error_nonbool(var); + if (!strcmp(value, "fsync")) + fsync_method = FSYNC_METHOD_FSYNC; + else if (!strcmp(value, "writeout-only")) + fsync_method = FSYNC_METHOD_WRITEOUT_ONLY; + else + warning(_("ignoring unknown core.fsyncMethod value '%s'"), value); + + } + if (!strcmp(var, "core.fsyncobjectfiles")) { + if (fsync_object_files < 0) + warning(_("core.fsyncobjectfiles is deprecated; use core.fsync instead")); fsync_object_files = git_config_bool(var, value); return 0; } @@ -1654,6 +1749,17 @@ static int git_default_core_config(const char *var, const char *value, void *cb) return platform_core_config(var, value, cb); } +static int git_default_sparse_config(const char *var, const char *value) +{ + if (!strcmp(var, "sparse.expectfilesoutsideofpatterns")) { + sparse_expect_files_outside_of_patterns = git_config_bool(var, value); + return 0; + } + + /* Add other config variables here and to Documentation/config/sparse.txt. */ + return 0; +} + static int git_default_i18n_config(const char *var, const char *value) { if (!strcmp(var, "i18n.commitencoding")) @@ -1785,6 +1891,9 @@ int git_default_config(const char *var, const char *value, void *cb) return 0; } + if (starts_with(var, "sparse.")) + return git_default_sparse_config(var, value); + /* Add other config variables here and to Documentation/config.txt. */ return 0; } diff --git a/config.mak.uname b/config.mak.uname index 4352ea39e9..f6ac966c08 100644 --- a/config.mak.uname +++ b/config.mak.uname @@ -57,6 +57,7 @@ ifeq ($(uname_S),Linux) HAVE_CLOCK_MONOTONIC = YesPlease # -lrt is needed for clock_gettime on glibc <= 2.16 NEEDS_LIBRT = YesPlease + HAVE_SYNC_FILE_RANGE = YesPlease HAVE_GETDELIM = YesPlease FREAD_READS_DIRECTORIES = UnfortunatelyYes BASIC_CFLAGS += -DHAVE_SYSINFO @@ -463,6 +464,7 @@ endif CFLAGS = BASIC_CFLAGS = -nologo -I. -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE COMPAT_OBJS = compat/msvc.o compat/winansi.o \ + compat/win32/flush.o \ compat/win32/path-utils.o \ compat/win32/pthread.o compat/win32/syslog.o \ compat/win32/trace2_win32_process_info.o \ @@ -640,6 +642,7 @@ ifeq ($(uname_S),MINGW) COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\" COMPAT_OBJS += compat/mingw.o compat/winansi.o \ compat/win32/trace2_win32_process_info.o \ + compat/win32/flush.o \ compat/win32/path-utils.o \ compat/win32/pthread.o compat/win32/syslog.o \ compat/win32/dirent.o @@ -727,7 +730,6 @@ vcxproj: git diff-index --cached --quiet HEAD -- # Make .vcxproj files and add them - unset QUIET_GEN QUIET_BUILT_IN; \ perl contrib/buildsystems/generate -g Vcxproj git add -f git.sln {*,*/lib,t/helper/*}/*.vcxproj diff --git a/configure.ac b/configure.ac index 5ee25ec95c..6bd6bef1c4 100644 --- a/configure.ac +++ b/configure.ac @@ -1082,6 +1082,14 @@ AC_COMPILE_IFELSE([CLOCK_MONOTONIC_SRC], [AC_MSG_RESULT([no]) HAVE_CLOCK_MONOTONIC=]) GIT_CONF_SUBST([HAVE_CLOCK_MONOTONIC]) + +# +# Define HAVE_SYNC_FILE_RANGE=YesPlease if sync_file_range is available. +GIT_CHECK_FUNC(sync_file_range, + [HAVE_SYNC_FILE_RANGE=YesPlease], + [HAVE_SYNC_FILE_RANGE]) +GIT_CONF_SUBST([HAVE_SYNC_FILE_RANGE]) + # # Define NO_SETITIMER if you don't have setitimer. GIT_CHECK_FUNC(setitimer, diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt index e44232f85d..3a9e624166 100644 --- a/contrib/buildsystems/CMakeLists.txt +++ b/contrib/buildsystems/CMakeLists.txt @@ -261,10 +261,18 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows") NOGDI OBJECT_CREATION_MODE=1 __USE_MINGW_ANSI_STDIO=0 USE_NED_ALLOCATOR OVERRIDE_STRDUP MMAP_PREVENTS_DELETE USE_WIN32_MMAP UNICODE _UNICODE HAVE_WPGMPTR ENSURE_MSYSTEM_IS_SET HAVE_RTLGENRANDOM) - list(APPEND compat_SOURCES compat/mingw.c compat/winansi.c compat/win32/path-utils.c - compat/win32/pthread.c compat/win32mmap.c compat/win32/syslog.c - compat/win32/trace2_win32_process_info.c compat/win32/dirent.c - compat/nedmalloc/nedmalloc.c compat/strdup.c) + list(APPEND compat_SOURCES + compat/mingw.c + compat/winansi.c + compat/win32/flush.c + compat/win32/path-utils.c + compat/win32/pthread.c + compat/win32mmap.c + compat/win32/syslog.c + compat/win32/trace2_win32_process_info.c + compat/win32/dirent.c + compat/nedmalloc/nedmalloc.c + compat/strdup.c) set(NO_UNIX_SOCKETS 1) elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux") diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash index 49a328aa8a..ba5c395d2d 100644 --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@ -2890,6 +2890,10 @@ _git_restore () --*) __gitcomp_builtin restore ;; + *) + if __git rev-parse --verify --quiet HEAD >/dev/null; then + __git_complete_index_file "--modified" + fi esac } diff --git a/contrib/scalar/Makefile b/contrib/scalar/Makefile index 231b1ee179..5e86d78e19 100644 --- a/contrib/scalar/Makefile +++ b/contrib/scalar/Makefile @@ -1,18 +1,8 @@ -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir -QUIET_SUBDIR1 = - -ifneq ($(findstring s,$(MAKEFLAGS)),s) -ifndef V - QUIET_GEN = @echo ' ' GEN $@; - QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ - $(MAKE) $(PRINT_DIR) -C $$subdir -else - export V -endif -endif - -all: +# The default target of this Makefile is... +all:: + +# Import tree-wide shared Makefile behavior and libraries +include ../../shared.mak include ../../config.mak.uname -include ../../config.mak.autogen diff --git a/contrib/scalar/t/Makefile b/contrib/scalar/t/Makefile index 6170672bb3..01e82e56d1 100644 --- a/contrib/scalar/t/Makefile +++ b/contrib/scalar/t/Makefile @@ -1,3 +1,6 @@ +# Import tree-wide shared Makefile behavior and libraries +include ../../../shared.mak + # Run scalar tests # # Copyright (c) 2005,2021 Junio C Hamano, Johannes Schindelin @@ -1159,7 +1159,7 @@ static int ident_to_worktree(const char *src, size_t len, /* are we "faking" in place editing ? */ if (src == buf->buf) to_free = strbuf_detach(buf, NULL); - hash_object_file(the_hash_algo, src, len, "blob", &oid); + hash_object_file(the_hash_algo, src, len, OBJ_BLOB, &oid); strbuf_grow(buf, len + cnt * (the_hash_algo->hexsz + 3)); for (;;) { diff --git a/credential.c b/credential.c index e7240f3f63..f6389a5068 100644 --- a/credential.c +++ b/credential.c @@ -130,6 +130,7 @@ static void credential_apply_config(struct credential *c) git_config(urlmatch_config_entry, &config); string_list_clear(&config.vars, 1); free(normalized_url); + urlmatch_config_release(&config); strbuf_release(&url); c->configured = 1; diff --git a/csum-file.c b/csum-file.c index 26e8a6df44..59ef3398ca 100644 --- a/csum-file.c +++ b/csum-file.c @@ -58,7 +58,8 @@ static void free_hashfile(struct hashfile *f) free(f); } -int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int flags) +int finalize_hashfile(struct hashfile *f, unsigned char *result, + enum fsync_component component, unsigned int flags) { int fd; @@ -69,7 +70,7 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int fl if (flags & CSUM_HASH_IN_STREAM) flush(f, f->buffer, the_hash_algo->rawsz); if (flags & CSUM_FSYNC) - fsync_or_die(f->fd, f->name); + fsync_component_or_die(component, f->fd, f->name); if (flags & CSUM_CLOSE) { if (close(f->fd)) die_errno("%s: sha1 file error on close", f->name); diff --git a/csum-file.h b/csum-file.h index 291215b34e..0d29f528fb 100644 --- a/csum-file.h +++ b/csum-file.h @@ -1,6 +1,7 @@ #ifndef CSUM_FILE_H #define CSUM_FILE_H +#include "cache.h" #include "hash.h" struct progress; @@ -38,7 +39,7 @@ int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *); struct hashfile *hashfd(int fd, const char *name); struct hashfile *hashfd_check(const char *name); struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp); -int finalize_hashfile(struct hashfile *, unsigned char *, unsigned int); +int finalize_hashfile(struct hashfile *, unsigned char *, enum fsync_component, unsigned int); void hashwrite(struct hashfile *, const void *, unsigned int); void hashflush(struct hashfile *f); void crc32_begin(struct hashfile *); @@ -800,6 +800,14 @@ static void append_emitted_diff_symbol(struct diff_options *o, f->line = e->line ? xmemdupz(e->line, e->len) : NULL; } +static void free_emitted_diff_symbols(struct emitted_diff_symbols *e) +{ + if (!e) + return; + free(e->buf); + free(e); +} + struct moved_entry { const struct emitted_diff_symbol *es; struct moved_entry *next_line; @@ -2227,7 +2235,7 @@ static void free_diff_words_data(struct emit_callback *ecbdata) { if (ecbdata->diff_words) { diff_words_flush(ecbdata); - free (ecbdata->diff_words->opt->emitted_symbols); + free_emitted_diff_symbols(ecbdata->diff_words->opt->emitted_symbols); free (ecbdata->diff_words->opt); free (ecbdata->diff_words->minus.text.ptr); free (ecbdata->diff_words->minus.orig); diff --git a/diffcore-rename.c b/diffcore-rename.c index bebd4ed6a4..c0422d9e70 100644 --- a/diffcore-rename.c +++ b/diffcore-rename.c @@ -261,7 +261,7 @@ static unsigned int hash_filespec(struct repository *r, if (diff_populate_filespec(r, filespec, NULL)) return 0; hash_object_file(r->hash_algo, filespec->data, filespec->size, - "blob", &filespec->oid); + OBJ_BLOB, &filespec->oid); } return oidhash(&filespec->oid); } @@ -1113,7 +1113,7 @@ static int add_patterns(const char *fname, const char *base, int baselen, &istate->cache[pos]->oid); else hash_object_file(the_hash_algo, buf, size, - "blob", &oid_stat->oid); + OBJ_BLOB, &oid_stat->oid); fill_stat_data(&oid_stat->stat, &st); oid_stat->valid = 1; } @@ -1463,10 +1463,11 @@ static int path_in_sparse_checkout_1(const char *path, const char *end, *slash; /* - * We default to accepting a path if there are no patterns or - * they are of the wrong type. + * We default to accepting a path if the path is empty, there are no + * patterns, or the patterns are of the wrong type. */ - if (init_sparse_checkout_patterns(istate) || + if (!*path || + init_sparse_checkout_patterns(istate) || (require_cone_mode && !istate->sparse_checkout_patterns->use_cone_patterns)) return 1; @@ -2781,7 +2782,8 @@ void remove_untracked_cache(struct index_state *istate) static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *dir, int base_len, - const struct pathspec *pathspec) + const struct pathspec *pathspec, + struct index_state *istate) { struct untracked_cache_dir *root; static int untracked_cache_disabled = -1; @@ -2845,8 +2847,11 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d return NULL; } - if (!dir->untracked->root) + if (!dir->untracked->root) { + /* Untracked cache existed but is not initialized; fix that */ FLEX_ALLOC_STR(dir->untracked->root, name, ""); + istate->cache_changed |= UNTRACKED_CHANGED; + } /* Validate $GIT_DIR/info/exclude and core.excludesfile */ root = dir->untracked->root; @@ -2916,7 +2921,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, return dir->nr; } - untracked = validate_untracked_cache(dir, len, pathspec); + untracked = validate_untracked_cache(dir, len, pathspec, istate); if (!untracked) /* * make sure untracked cache code path is disabled, diff --git a/environment.c b/environment.c index fd0501e77a..f27e235548 100644 --- a/environment.c +++ b/environment.c @@ -42,8 +42,10 @@ const char *git_attributes_file; const char *git_hooks_path; int zlib_compression_level = Z_BEST_SPEED; int pack_compression_level = Z_DEFAULT_COMPRESSION; -int fsync_object_files; +int fsync_object_files = -1; int use_fsync = -1; +enum fsync_method fsync_method = FSYNC_METHOD_DEFAULT; +enum fsync_component fsync_components = FSYNC_COMPONENTS_DEFAULT; size_t packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE; size_t packed_git_limit = DEFAULT_PACKED_GIT_LIMIT; size_t delta_base_cache_limit = 96 * 1024 * 1024; @@ -70,6 +72,7 @@ char *notes_ref_name; int grafts_replace_parents = 1; int core_apply_sparse_checkout; int core_sparse_checkout_cone; +int sparse_expect_files_outside_of_patterns; int merge_log_config = -1; int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */ unsigned long pack_size_limit_cfg; diff --git a/git-compat-util.h b/git-compat-util.h index 876907b9df..0892e209a2 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -197,12 +197,6 @@ #endif #include <windows.h> #define GIT_WINDOWS_NATIVE -#ifdef HAVE_RTLGENRANDOM -/* This is required to get access to RtlGenRandom. */ -#define SystemFunction036 NTAPI SystemFunction036 -#include <NTSecAPI.h> -#undef SystemFunction036 -#endif #endif #include <unistd.h> @@ -534,9 +528,7 @@ void warning_errno(const char *err, ...) __attribute__((format (printf, 1, 2))); /* * Let callers be aware of the constant return value; this can help * gcc with -Wuninitialized analysis. We restrict this trick to gcc, though, - * because some compilers may not support variadic macros. Since we're only - * trying to help gcc, anyway, it's OK; other compilers will fall back to - * using the function as usual. + * because other compilers may be confused by this. */ #if defined(__GNUC__) static inline int const_error(void) @@ -1258,25 +1250,37 @@ static inline int regexec_buf(const regex_t *preg, const char *buf, size_t size, #endif #endif -/* - * This is always defined as a first step towards making the use of variadic - * macros unconditional. If it causes compilation problems on your platform, - * please report it to the Git mailing list at git@vger.kernel.org. - */ -#define HAVE_VARIADIC_MACROS 1 - /* usage.c: only to be used for testing BUG() implementation (see test-tool) */ extern int BUG_exit_code; -#ifdef HAVE_VARIADIC_MACROS __attribute__((format (printf, 3, 4))) NORETURN void BUG_fl(const char *file, int line, const char *fmt, ...); #define BUG(...) BUG_fl(__FILE__, __LINE__, __VA_ARGS__) + +#ifdef __APPLE__ +#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_WRITEOUT_ONLY #else -__attribute__((format (printf, 1, 2))) NORETURN -void BUG(const char *fmt, ...); +#define FSYNC_METHOD_DEFAULT FSYNC_METHOD_FSYNC #endif +enum fsync_action { + FSYNC_WRITEOUT_ONLY, + FSYNC_HARDWARE_FLUSH +}; + +/* + * Issues an fsync against the specified file according to the specified mode. + * + * FSYNC_WRITEOUT_ONLY attempts to use interfaces available on some operating + * systems to flush the OS cache without issuing a flush command to the storage + * controller. If those interfaces are unavailable, the function fails with + * ENOSYS. + * + * FSYNC_HARDWARE_FLUSH does an OS writeout and hardware flush to ensure that + * changes are durable. It is not expected to fail. + */ +int git_fsync(int fd, enum fsync_action action); + /* * Preserves errno, prints a message, but gives no warning for ENOENT. * Returns 0 on success, which includes trying to unlink an object that does diff --git a/git-submodule.sh b/git-submodule.sh index 87772ac891..aa8bdfca9d 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -247,20 +247,6 @@ cmd_deinit() git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@" } -# usage: fetch_in_submodule <module_path> [<depth>] [<sha1>] -# Because arguments are positional, use an empty string to omit <depth> -# but include <sha1>. -fetch_in_submodule () ( - sanitize_submodule_env && - cd "$1" && - if test $# -eq 3 - then - echo "$3" | git fetch ${GIT_QUIET:+--quiet} --stdin ${2:+"$2"} - else - git fetch ${GIT_QUIET:+--quiet} ${2:+"$2"} - fi -) - # # Update each submodule path to correct revision, using clone and checkout as needed # @@ -370,19 +356,11 @@ cmd_update() shift done - if test -n "$filter" && test "$init" != "1" - then - usage - fi - - if test -n "$init" - then - cmd_init "--" "$@" || return - fi - { - git submodule--helper update-clone ${GIT_QUIET:+--quiet} \ + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper update-clone \ + ${GIT_QUIET:+--quiet} \ ${progress:+"--progress"} \ + ${init:+--init} \ ${wt_prefix:+--prefix "$wt_prefix"} \ ${prefix:+--recursive-prefix "$prefix"} \ ${update:+--update "$update"} \ @@ -402,33 +380,11 @@ cmd_update() do die_if_unmatched "$quickabort" "$sha1" - git submodule--helper ensure-core-worktree "$sm_path" || exit 1 - displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix") - if test $just_cloned -eq 1 + if test $just_cloned -eq 0 then - subsha1= - else just_cloned= - subsha1=$(sanitize_submodule_env; cd "$sm_path" && - git rev-parse --verify HEAD) || - die "fatal: $(eval_gettext "Unable to find current revision in submodule path '\$displaypath'")" - fi - - if test -n "$remote" - then - branch=$(git submodule--helper remote-branch "$sm_path") - if test -z "$nofetch" - then - # Fetch remote before determining tracking $sha1 - fetch_in_submodule "$sm_path" $depth || - die "fatal: $(eval_gettext "Unable to fetch in submodule path '\$sm_path'")" - fi - remote_name=$(sanitize_submodule_env; cd "$sm_path" && git submodule--helper print-default-remote) - sha1=$(sanitize_submodule_env; cd "$sm_path" && - git rev-parse --verify "${remote_name}/${branch}") || - die "fatal: $(eval_gettext "Unable to find current \${remote_name}/\${branch} revision in submodule path '\$sm_path'")" fi out=$(git submodule--helper run-update-procedure \ @@ -441,7 +397,7 @@ cmd_update() ${update:+--update "$update"} \ ${prefix:+--recursive-prefix "$prefix"} \ ${sha1:+--oid "$sha1"} \ - ${subsha1:+--suboid "$subsha1"} \ + ${remote:+--remote} \ "--" \ "$sm_path") diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl index fbd1c20a23..606b50104c 100755 --- a/gitweb/gitweb.perl +++ b/gitweb/gitweb.perl @@ -4213,8 +4213,7 @@ sub git_header_html { my %opts = @_; my $title = get_page_title(); - my $content_type = get_content_type_html(); - print $cgi->header(-type=>$content_type, -charset => 'utf-8', + print $cgi->header(-type=>get_content_type_html(), -charset => 'utf-8', -status=> $status, -expires => $expires) unless ($opts{'-no_http_header'}); my $mod_perl_version = $ENV{'MOD_PERL'} ? " $ENV{'MOD_PERL'}" : ''; @@ -4225,7 +4224,6 @@ sub git_header_html { <!-- git web interface version $version, (C) 2005-2006, Kay Sievers <kay.sievers\@vrfy.org>, Christian Gierke --> <!-- git core binaries version $git_version --> <head> -<meta http-equiv="content-type" content="$content_type; charset=utf-8"/> <meta name="generator" content="gitweb/$version git/$git_version$mod_perl_version"/> <meta name="robots" content="index, nofollow"/> <title>$title</title> diff --git a/gpg-interface.c b/gpg-interface.c index aa50224e67..280f1fa1a5 100644 --- a/gpg-interface.c +++ b/gpg-interface.c @@ -934,6 +934,7 @@ static int sign_buffer_gpg(struct strbuf *buffer, struct strbuf *signature, struct child_process gpg = CHILD_PROCESS_INIT; int ret; size_t bottom; + const char *cp; struct strbuf gpg_status = STRBUF_INIT; strvec_pushl(&gpg.args, @@ -953,7 +954,13 @@ static int sign_buffer_gpg(struct strbuf *buffer, struct strbuf *signature, signature, 1024, &gpg_status, 0); sigchain_pop(SIGPIPE); - ret |= !strstr(gpg_status.buf, "\n[GNUPG:] SIG_CREATED "); + for (cp = gpg_status.buf; + cp && (cp = strstr(cp, "[GNUPG:] SIG_CREATED ")); + cp++) { + if (cp == gpg_status.buf || cp[-1] == '\n') + break; /* found */ + } + ret |= !cp; strbuf_release(&gpg_status); if (ret) return error(_("gpg failed to sign the data")); @@ -124,7 +124,9 @@ static void print_cmd_by_category(const struct category_description *catdesc, uint32_t mask = catdesc[i].category; const char *desc = catdesc[i].desc; - printf("\n%s\n", _(desc)); + if (i) + putchar('\n'); + puts(_(desc)); print_command_list(cmds, mask, longest); } free(cmds); @@ -317,7 +319,7 @@ void list_commands(struct cmdnames *main_cmds, struct cmdnames *other_cmds) } if (other_cmds->cnt) { - printf_ln(_("git commands available from elsewhere on your $PATH")); + puts(_("git commands available from elsewhere on your $PATH")); putchar('\n'); pretty_print_cmdnames(other_cmds, colopts); putchar('\n'); @@ -327,6 +329,7 @@ void list_commands(struct cmdnames *main_cmds, struct cmdnames *other_cmds) void list_common_cmds_help(void) { puts(_("These are common Git commands used in various situations:")); + putchar('\n'); print_cmd_by_category(common_categories, NULL); } @@ -432,15 +435,10 @@ static int get_alias(const char *var, const char *value, void *data) return 0; } -void list_all_cmds_help(void) +static void list_all_cmds_help_external_commands(void) { struct string_list others = STRING_LIST_INIT_DUP; - struct string_list alias_list = STRING_LIST_INIT_DUP; - struct cmdname_help *aliases; - int i, longest; - - printf_ln(_("See 'git help <command>' to read about a specific subcommand")); - print_cmd_by_category(main_categories, &longest); + int i; list_all_other_cmds(&others); if (others.nr) @@ -448,6 +446,13 @@ void list_all_cmds_help(void) for (i = 0; i < others.nr; i++) printf(" %s\n", others.items[i].string); string_list_clear(&others, 0); +} + +static void list_all_cmds_help_aliases(int longest) +{ + struct string_list alias_list = STRING_LIST_INIT_DUP; + struct cmdname_help *aliases; + int i; git_config(get_alias, &alias_list); string_list_sort(&alias_list); @@ -473,6 +478,20 @@ void list_all_cmds_help(void) string_list_clear(&alias_list, 1); } +void list_all_cmds_help(int show_external_commands, int show_aliases) +{ + int longest; + + puts(_("See 'git help <command>' to read about a specific subcommand")); + putchar('\n'); + print_cmd_by_category(main_categories, &longest); + + if (show_external_commands) + list_all_cmds_help_external_commands(); + if (show_aliases) + list_all_cmds_help_aliases(longest); +} + int is_in_cmdlist(struct cmdnames *c, const char *s) { int i; @@ -20,7 +20,7 @@ static inline void mput_char(char c, unsigned int num) } void list_common_cmds_help(void); -void list_all_cmds_help(void); +void list_all_cmds_help(int show_external_commands, int show_aliases); void list_guides_help(void); void list_all_main_cmds(struct string_list *list); diff --git a/http-push.c b/http-push.c index 3309aaf004..f0c044dcf7 100644 --- a/http-push.c +++ b/http-push.c @@ -363,7 +363,7 @@ static void start_put(struct transfer_request *request) git_zstream stream; unpacked = read_object_file(&request->obj->oid, &type, &len); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1; + hdrlen = format_object_header(hdr, sizeof(hdr), type, len); /* Set it up */ git_deflate_init(&stream, zlib_compression_level); diff --git a/list-objects-filter-options.c b/list-objects-filter-options.c index fd8d59f653..f02d8df142 100644 --- a/list-objects-filter-options.c +++ b/list-objects-filter-options.c @@ -40,22 +40,7 @@ const char *list_object_filter_config_name(enum list_objects_filter_choice c) BUG("list_object_filter_config_name: invalid argument '%d'", c); } -/* - * Parse value of the argument to the "filter" keyword. - * On the command line this looks like: - * --filter=<arg> - * and in the pack protocol as: - * "filter" SP <arg> - * - * The filter keyword will be used by many commands. - * See Documentation/rev-list-options.txt for allowed values for <arg>. - * - * Capture the given arg as the "filter_spec". This can be forwarded to - * subordinate commands when necessary (although it's better to pass it through - * expand_list_objects_filter_spec() first). We also "intern" the arg for the - * convenience of the current command. - */ -static int gently_parse_list_objects_filter( +int gently_parse_list_objects_filter( struct list_objects_filter_options *filter_options, const char *arg, struct strbuf *errbuf) @@ -415,3 +400,22 @@ void partial_clone_get_default_filter_spec( &errbuf); strbuf_release(&errbuf); } + +void list_objects_filter_copy( + struct list_objects_filter_options *dest, + const struct list_objects_filter_options *src) +{ + int i; + struct string_list_item *item; + + /* Copy everything. We will overwrite the pointers shortly. */ + memcpy(dest, src, sizeof(struct list_objects_filter_options)); + + string_list_init_dup(&dest->filter_spec); + for_each_string_list_item(item, &src->filter_spec) + string_list_append(&dest->filter_spec, item->string); + + ALLOC_ARRAY(dest->sub, dest->sub_alloc); + for (i = 0; i < src->sub_nr; i++) + list_objects_filter_copy(&dest->sub[i], &src->sub[i]); +} diff --git a/list-objects-filter-options.h b/list-objects-filter-options.h index da5b6737e2..2eb6c98394 100644 --- a/list-objects-filter-options.h +++ b/list-objects-filter-options.h @@ -72,6 +72,26 @@ struct list_objects_filter_options { /* Normalized command line arguments */ #define CL_ARG__FILTER "filter" +/* + * Parse value of the argument to the "filter" keyword. + * On the command line this looks like: + * --filter=<arg> + * and in the pack protocol as: + * "filter" SP <arg> + * + * The filter keyword will be used by many commands. + * See Documentation/rev-list-options.txt for allowed values for <arg>. + * + * Capture the given arg as the "filter_spec". This can be forwarded to + * subordinate commands when necessary (although it's better to pass it through + * expand_list_objects_filter_spec() first). We also "intern" the arg for the + * convenience of the current command. + */ +int gently_parse_list_objects_filter( + struct list_objects_filter_options *filter_options, + const char *arg, + struct strbuf *errbuf); + void list_objects_filter_die_if_populated( struct list_objects_filter_options *filter_options); @@ -132,4 +152,8 @@ void partial_clone_get_default_filter_spec( struct list_objects_filter_options *filter_options, const char *remote); +void list_objects_filter_copy( + struct list_objects_filter_options *dest, + const struct list_objects_filter_options *src); + #endif /* LIST_OBJECTS_FILTER_OPTIONS_H */ diff --git a/list-objects.c b/list-objects.c index 2f623f8211..250d9de41c 100644 --- a/list-objects.c +++ b/list-objects.c @@ -21,6 +21,23 @@ struct traversal_context { struct filter *filter; }; +static void show_commit(struct traversal_context *ctx, + struct commit *commit) +{ + if (!ctx->show_commit) + return; + ctx->show_commit(commit, ctx->show_data); +} + +static void show_object(struct traversal_context *ctx, + struct object *object, + const char *name) +{ + if (!ctx->show_object) + return; + ctx->show_object(object, name, ctx->show_data); +} + static void process_blob(struct traversal_context *ctx, struct blob *blob, struct strbuf *path, @@ -60,7 +77,7 @@ static void process_blob(struct traversal_context *ctx, if (r & LOFR_MARK_SEEN) obj->flags |= SEEN; if (r & LOFR_DO_SHOW) - ctx->show_object(obj, path->buf, ctx->show_data); + show_object(ctx, obj, path->buf); strbuf_setlen(path, pathlen); } @@ -194,7 +211,7 @@ static void process_tree(struct traversal_context *ctx, if (r & LOFR_MARK_SEEN) obj->flags |= SEEN; if (r & LOFR_DO_SHOW) - ctx->show_object(obj, base->buf, ctx->show_data); + show_object(ctx, obj, base->buf); if (base->len) strbuf_addch(base, '/'); @@ -210,7 +227,7 @@ static void process_tree(struct traversal_context *ctx, if (r & LOFR_MARK_SEEN) obj->flags |= SEEN; if (r & LOFR_DO_SHOW) - ctx->show_object(obj, base->buf, ctx->show_data); + show_object(ctx, obj, base->buf); strbuf_setlen(base, baselen); free_tree_buffer(tree); @@ -228,7 +245,7 @@ static void process_tag(struct traversal_context *ctx, if (r & LOFR_MARK_SEEN) tag->object.flags |= SEEN; if (r & LOFR_DO_SHOW) - ctx->show_object(&tag->object, name, ctx->show_data); + show_object(ctx, &tag->object, name); } static void mark_edge_parents_uninteresting(struct commit *commit, @@ -402,7 +419,7 @@ static void do_traverse(struct traversal_context *ctx) if (r & LOFR_MARK_SEEN) commit->object.flags |= SEEN; if (r & LOFR_DO_SHOW) - ctx->show_commit(commit, ctx->show_data); + show_commit(ctx, commit); if (ctx->revs->tree_blobs_in_commit_order) /* @@ -416,35 +433,25 @@ static void do_traverse(struct traversal_context *ctx) strbuf_release(&csp); } -void traverse_commit_list(struct rev_info *revs, - show_commit_fn show_commit, - show_object_fn show_object, - void *show_data) -{ - struct traversal_context ctx; - ctx.revs = revs; - ctx.show_commit = show_commit; - ctx.show_object = show_object; - ctx.show_data = show_data; - ctx.filter = NULL; - do_traverse(&ctx); -} - void traverse_commit_list_filtered( - struct list_objects_filter_options *filter_options, struct rev_info *revs, show_commit_fn show_commit, show_object_fn show_object, void *show_data, struct oidset *omitted) { - struct traversal_context ctx; + struct traversal_context ctx = { + .revs = revs, + .show_object = show_object, + .show_commit = show_commit, + .show_data = show_data, + }; + + if (revs->filter.choice) + ctx.filter = list_objects_filter__init(omitted, &revs->filter); - ctx.revs = revs; - ctx.show_object = show_object; - ctx.show_commit = show_commit; - ctx.show_data = show_data; - ctx.filter = list_objects_filter__init(omitted, filter_options); do_traverse(&ctx); - list_objects_filter__free(ctx.filter); + + if (ctx.filter) + list_objects_filter__free(ctx.filter); } diff --git a/list-objects.h b/list-objects.h index a952680e46..9eaf4de844 100644 --- a/list-objects.h +++ b/list-objects.h @@ -7,7 +7,6 @@ struct rev_info; typedef void (*show_commit_fn)(struct commit *, void *); typedef void (*show_object_fn)(struct object *, const char *, void *); -void traverse_commit_list(struct rev_info *, show_commit_fn, show_object_fn, void *); typedef void (*show_edge_fn)(struct commit *); void mark_edges_uninteresting(struct rev_info *revs, @@ -18,11 +17,20 @@ struct oidset; struct list_objects_filter_options; void traverse_commit_list_filtered( - struct list_objects_filter_options *filter_options, struct rev_info *revs, show_commit_fn show_commit, show_object_fn show_object, void *show_data, struct oidset *omitted); +static inline void traverse_commit_list( + struct rev_info *revs, + show_commit_fn show_commit, + show_object_fn show_object, + void *show_data) +{ + traverse_commit_list_filtered(revs, show_commit, + show_object, show_data, NULL); +} + #endif /* LIST_OBJECTS_H */ diff --git a/log-tree.c b/log-tree.c index 25165e2a91..38e5cccc1a 100644 --- a/log-tree.c +++ b/log-tree.c @@ -565,7 +565,7 @@ static int show_one_mergetag(struct commit *commit, struct strbuf signature = STRBUF_INIT; hash_object_file(the_hash_algo, extra->value, extra->len, - type_name(OBJ_TAG), &oid); + OBJ_TAG, &oid); tag = lookup_tag(the_repository, &oid); if (!tag) return -1; /* error message already given */ @@ -43,8 +43,8 @@ static void free_mailmap_info(void *p, const char *s) static void free_mailmap_entry(void *p, const char *s) { struct mailmap_entry *me = (struct mailmap_entry *)p; - debug_mm("mailmap: removing entries for <%s>, with %d sub-entries\n", - s, me->namemap.nr); + debug_mm("mailmap: removing entries for <%s>, with %"PRIuMAX" sub-entries\n", + s, (uintmax_t)me->namemap.nr); debug_mm("mailmap: - simple: '%s' <%s>\n", debug_str(me->name), debug_str(me->email)); @@ -250,7 +250,8 @@ int read_mailmap(struct string_list *map) void clear_mailmap(struct string_list *map) { - debug_mm("mailmap: clearing %d entries...\n", map->nr); + debug_mm("mailmap: clearing %"PRIuMAX" entries...\n", + (uintmax_t)map->nr); map->strdup_strings = 1; string_list_clear_func(map, free_mailmap_entry); debug_mm("mailmap: cleared\n"); diff --git a/match-trees.c b/match-trees.c index df413989fa..49398e599f 100644 --- a/match-trees.c +++ b/match-trees.c @@ -235,7 +235,7 @@ static int splice_tree(const struct object_id *oid1, const char *prefix, rewrite_with = oid2; } hashcpy(rewrite_here, rewrite_with->hash); - status = write_object_file(buf, sz, tree_type, result); + status = write_object_file(buf, sz, OBJ_TREE, result); free(buf); return status; } diff --git a/merge-ort.c b/merge-ort.c index ff739d4b36..8545354daf 100644 --- a/merge-ort.c +++ b/merge-ort.c @@ -639,8 +639,9 @@ static void path_msg(struct merge_options *opt, if (opt->record_conflict_msgs_as_headers && omittable_hint) return; /* Do not record mere hints in headers */ - if (opt->record_conflict_msgs_as_headers && opt->priv->call_depth) - return; /* Do not record inner merge issues in headers */ + if (opt->priv->call_depth && opt->verbosity < 5) + return; /* Ignore messages from inner merges */ + sb = strmap_get(&opt->priv->output, path); if (!sb) { sb = xmalloc(sizeof(*sb)); @@ -1937,7 +1938,7 @@ static int handle_content_merge(struct merge_options *opt, if (!ret && write_object_file(result_buf.ptr, result_buf.size, - blob_type, &result->oid)) + OBJ_BLOB, &result->oid)) ret = err(opt, _("Unable to add %s to database"), path); @@ -3391,7 +3392,7 @@ static void write_tree(struct object_id *result_oid, } /* Write this object file out, and record in result_oid */ - write_object_file(buf.buf, buf.len, tree_type, result_oid); + write_object_file(buf.buf, buf.len, OBJ_TREE, result_oid); strbuf_release(&buf); } @@ -4063,8 +4064,8 @@ static void process_entries(struct merge_options *opt, trace2_region_enter("merge", "process_entries cleanup", opt->repo); if (dir_metadata.offsets.nr != 1 || (uintptr_t)dir_metadata.offsets.items[0].util != 0) { - printf("dir_metadata.offsets.nr = %d (should be 1)\n", - dir_metadata.offsets.nr); + printf("dir_metadata.offsets.nr = %"PRIuMAX" (should be 1)\n", + (uintmax_t)dir_metadata.offsets.nr); printf("dir_metadata.offsets.items[0].util = %u (should be 0)\n", (unsigned)(uintptr_t)dir_metadata.offsets.items[0].util); fflush(stdout); diff --git a/merge-recursive.c b/merge-recursive.c index 9ec1e6d043..1ee6364e8b 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -1376,7 +1376,7 @@ static int merge_mode_and_contents(struct merge_options *opt, if (!ret && write_object_file(result_buf.ptr, result_buf.size, - blob_type, &result->blob.oid)) + OBJ_BLOB, &result->blob.oid)) ret = err(opt, _("Unable to add %s to database"), a->path); @@ -1438,7 +1438,8 @@ static int write_midx_internal(const char *object_dir, write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs); write_chunkfile(cf, &ctx); - finalize_hashfile(f, midx_hash, CSUM_FSYNC | CSUM_HASH_IN_STREAM); + finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA, + CSUM_FSYNC | CSUM_HASH_IN_STREAM); free_chunkfile(cf); if (flags & MIDX_WRITE_REV_INDEX && diff --git a/notes-cache.c b/notes-cache.c index 2473314d68..9dfd251a81 100644 --- a/notes-cache.c +++ b/notes-cache.c @@ -92,7 +92,7 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid, { struct object_id value_oid; - if (write_object_file(data, size, "blob", &value_oid) < 0) + if (write_object_file(data, size, OBJ_BLOB, &value_oid) < 0) return -1; return add_note(&c->tree, key_oid, &value_oid, NULL); } diff --git a/notes-merge.c b/notes-merge.c index 878b6c571b..b4cc594a79 100644 --- a/notes-merge.c +++ b/notes-merge.c @@ -113,6 +113,7 @@ static struct notes_merge_pair *find_notes_merge_pair_pos( } static struct object_id uninitialized = { + .hash = "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" \ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" }; @@ -675,7 +675,7 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws) ret = tree_write_stack_finish_subtree(n); if (ret) return ret; - ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s); + ret = write_object_file(n->buf.buf, n->buf.len, OBJ_TREE, &s); if (ret) return ret; strbuf_release(&n->buf); @@ -836,7 +836,7 @@ int combine_notes_concatenate(struct object_id *cur_oid, free(new_msg); /* create a new blob object from buf */ - ret = write_object_file(buf, buf_len, blob_type, cur_oid); + ret = write_object_file(buf, buf_len, OBJ_BLOB, cur_oid); free(buf); return ret; } @@ -916,7 +916,7 @@ int combine_notes_cat_sort_uniq(struct object_id *cur_oid, string_list_join_lines_helper, &buf)) goto out; - ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid); + ret = write_object_file(buf.buf, buf.len, OBJ_BLOB, cur_oid); out: strbuf_release(&buf); @@ -1192,7 +1192,7 @@ int write_notes_tree(struct notes_tree *t, struct object_id *result) ret = for_each_note(t, flags, write_each_note, &cb_data) || write_each_non_note_until(NULL, &cb_data) || tree_write_stack_finish_subtree(&root) || - write_object_file(root.buf.buf, root.buf.len, tree_type, result); + write_object_file(root.buf.buf, root.buf.len, OBJ_TREE, result); strbuf_release(&root.buf); return ret; } diff --git a/object-file.c b/object-file.c index 03bd6a3baf..b254bc50d7 100644 --- a/object-file.c +++ b/object-file.c @@ -274,10 +274,11 @@ static struct cached_object { static int cached_object_nr, cached_object_alloc; static struct cached_object empty_tree = { - { EMPTY_TREE_SHA1_BIN_LITERAL }, - OBJ_TREE, - "", - 0 + .oid = { + .hash = EMPTY_TREE_SHA1_BIN_LITERAL, + }, + .type = OBJ_TREE, + .buf = "", }; static struct cached_object *find_cached_object(const struct object_id *oid) @@ -1049,35 +1050,50 @@ void *xmmap(void *start, size_t length, return ret; } -/* - * With an in-core object data in "map", rehash it to make sure the - * object name actually matches "oid" to detect object corruption. - * With "map" == NULL, try reading the object named with "oid" using - * the streaming interface and rehash it to do the same. - */ +static int format_object_header_literally(char *str, size_t size, + const char *type, size_t objsize) +{ + return xsnprintf(str, size, "%s %"PRIuMAX, type, (uintmax_t)objsize) + 1; +} + +int format_object_header(char *str, size_t size, enum object_type type, + size_t objsize) +{ + const char *name = type_name(type); + + if (!name) + BUG("could not get a type name for 'enum object_type' value %d", type); + + return format_object_header_literally(str, size, name, objsize); +} + int check_object_signature(struct repository *r, const struct object_id *oid, - void *map, unsigned long size, const char *type, - struct object_id *real_oidp) + void *buf, unsigned long size, + enum object_type type) { - struct object_id tmp; - struct object_id *real_oid = real_oidp ? real_oidp : &tmp; + struct object_id real_oid; + + hash_object_file(r->hash_algo, buf, size, type, &real_oid); + + return !oideq(oid, &real_oid) ? -1 : 0; +} + +int stream_object_signature(struct repository *r, const struct object_id *oid) +{ + struct object_id real_oid; + unsigned long size; enum object_type obj_type; struct git_istream *st; git_hash_ctx c; char hdr[MAX_HEADER_LEN]; int hdrlen; - if (map) { - hash_object_file(r->hash_algo, map, size, type, real_oid); - return !oideq(oid, real_oid) ? -1 : 0; - } - st = open_istream(r, oid, &obj_type, &size, NULL); if (!st) return -1; /* Generate the header */ - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(obj_type), (uintmax_t)size) + 1; + hdrlen = format_object_header(hdr, sizeof(hdr), obj_type, size); /* Sha1.. */ r->hash_algo->init_fn(&c); @@ -1094,9 +1110,9 @@ int check_object_signature(struct repository *r, const struct object_id *oid, break; r->hash_algo->update_fn(&c, buf, readlen); } - r->hash_algo->final_oid_fn(real_oid, &c); + r->hash_algo->final_oid_fn(&real_oid, &c); close_istream(st); - return !oideq(oid, real_oid) ? -1 : 0; + return !oideq(oid, &real_oid) ? -1 : 0; } int git_open_cloexec(const char *name, int flags) @@ -1662,7 +1678,7 @@ int pretend_object_file(void *buf, unsigned long len, enum object_type type, { struct cached_object *co; - hash_object_file(the_hash_algo, buf, len, type_name(type), oid); + hash_object_file(the_hash_algo, buf, len, type, oid); if (has_object_file_with_flags(oid, OBJECT_INFO_QUICK | OBJECT_INFO_SKIP_FETCH_OBJECT) || find_cached_object(oid)) return 0; @@ -1722,16 +1738,15 @@ void *read_object_file_extended(struct repository *r, void *read_object_with_reference(struct repository *r, const struct object_id *oid, - const char *required_type_name, + enum object_type required_type, unsigned long *size, struct object_id *actual_oid_return) { - enum object_type type, required_type; + enum object_type type; void *buffer; unsigned long isize; struct object_id actual_oid; - required_type = type_from_string(required_type_name); oidcpy(&actual_oid, oid); while (1) { int ref_length = -1; @@ -1769,21 +1784,40 @@ void *read_object_with_reference(struct repository *r, } } +static void hash_object_body(const struct git_hash_algo *algo, git_hash_ctx *c, + const void *buf, unsigned long len, + struct object_id *oid, + char *hdr, int *hdrlen) +{ + algo->init_fn(c); + algo->update_fn(c, hdr, *hdrlen); + algo->update_fn(c, buf, len); + algo->final_oid_fn(oid, c); +} + static void write_object_file_prepare(const struct git_hash_algo *algo, const void *buf, unsigned long len, - const char *type, struct object_id *oid, + enum object_type type, struct object_id *oid, char *hdr, int *hdrlen) { git_hash_ctx c; /* Generate the header */ - *hdrlen = xsnprintf(hdr, *hdrlen, "%s %"PRIuMAX , type, (uintmax_t)len)+1; + *hdrlen = format_object_header(hdr, *hdrlen, type, len); /* Sha1.. */ - algo->init_fn(&c); - algo->update_fn(&c, hdr, *hdrlen); - algo->update_fn(&c, buf, len); - algo->final_oid_fn(oid, &c); + hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen); +} + +static void write_object_file_prepare_literally(const struct git_hash_algo *algo, + const void *buf, unsigned long len, + const char *type, struct object_id *oid, + char *hdr, int *hdrlen) +{ + git_hash_ctx c; + + *hdrlen = format_object_header_literally(hdr, *hdrlen, type, len); + hash_object_body(algo, &c, buf, len, oid, hdr, hdrlen); } /* @@ -1836,24 +1870,36 @@ static int write_buffer(int fd, const void *buf, size_t len) return 0; } -int hash_object_file(const struct git_hash_algo *algo, const void *buf, - unsigned long len, const char *type, - struct object_id *oid) +static void hash_object_file_literally(const struct git_hash_algo *algo, + const void *buf, unsigned long len, + const char *type, struct object_id *oid) { char hdr[MAX_HEADER_LEN]; int hdrlen = sizeof(hdr); - write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen); - return 0; + + write_object_file_prepare_literally(algo, buf, len, type, oid, hdr, &hdrlen); +} + +void hash_object_file(const struct git_hash_algo *algo, const void *buf, + unsigned long len, enum object_type type, + struct object_id *oid) +{ + hash_object_file_literally(algo, buf, len, type_name(type), oid); } /* Finalize a file on disk, and close it. */ static void close_loose_object(int fd) { - if (!the_repository->objects->odb->will_destroy) { - if (fsync_object_files) - fsync_or_die(fd, "loose object file"); - } + if (the_repository->objects->odb->will_destroy) + goto out; + + if (fsync_object_files > 0) + fsync_or_die(fd, "loose object file"); + else + fsync_component_or_die(FSYNC_COMPONENT_LOOSE_OBJECT, fd, + "loose object file"); +out: if (close(fd) != 0) die_errno(_("error when closing loose object file")); } @@ -1998,7 +2044,7 @@ static int freshen_packed_object(const struct object_id *oid) } int write_object_file_flags(const void *buf, unsigned long len, - const char *type, struct object_id *oid, + enum object_type type, struct object_id *oid, unsigned flags) { char hdr[MAX_HEADER_LEN]; @@ -2014,9 +2060,9 @@ int write_object_file_flags(const void *buf, unsigned long len, return write_loose_object(oid, hdr, hdrlen, buf, len, 0, flags); } -int hash_object_file_literally(const void *buf, unsigned long len, - const char *type, struct object_id *oid, - unsigned flags) +int write_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags) { char *header; int hdrlen, status = 0; @@ -2024,8 +2070,8 @@ int hash_object_file_literally(const void *buf, unsigned long len, /* type string, SP, %lu of the length plus NUL must fit this */ hdrlen = strlen(type) + MAX_HEADER_LEN; header = xmalloc(hdrlen); - write_object_file_prepare(the_hash_algo, buf, len, type, oid, header, - &hdrlen); + write_object_file_prepare_literally(the_hash_algo, buf, len, type, + oid, header, &hdrlen); if (!(flags & HASH_WRITE_OBJECT)) goto cleanup; @@ -2052,7 +2098,7 @@ int force_object_loose(const struct object_id *oid, time_t mtime) buf = read_object(the_repository, oid, &type, &len); if (!buf) return error(_("cannot read object for %s"), oid_to_hex(oid)); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1; + hdrlen = format_object_header(hdr, sizeof(hdr), type, len); ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime, 0); free(buf); @@ -2118,7 +2164,8 @@ static int index_mem(struct index_state *istate, enum object_type type, const char *path, unsigned flags) { - int ret, re_allocated = 0; + int ret = 0; + int re_allocated = 0; int write_object = flags & HASH_WRITE_OBJECT; if (!type) @@ -2145,10 +2192,9 @@ static int index_mem(struct index_state *istate, } if (write_object) - ret = write_object_file(buf, size, type_name(type), oid); + ret = write_object_file(buf, size, type, oid); else - ret = hash_object_file(the_hash_algo, buf, size, - type_name(type), oid); + hash_object_file(the_hash_algo, buf, size, type, oid); if (re_allocated) free(buf); return ret; @@ -2160,7 +2206,7 @@ static int index_stream_convert_blob(struct index_state *istate, const char *path, unsigned flags) { - int ret; + int ret = 0; const int write_object = flags & HASH_WRITE_OBJECT; struct strbuf sbuf = STRBUF_INIT; @@ -2171,11 +2217,11 @@ static int index_stream_convert_blob(struct index_state *istate, get_conv_flags(flags)); if (write_object) - ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB), + ret = write_object_file(sbuf.buf, sbuf.len, OBJ_BLOB, oid); else - ret = hash_object_file(the_hash_algo, sbuf.buf, sbuf.len, - type_name(OBJ_BLOB), oid); + hash_object_file(the_hash_algo, sbuf.buf, sbuf.len, OBJ_BLOB, + oid); strbuf_release(&sbuf); return ret; } @@ -2294,8 +2340,8 @@ int index_path(struct index_state *istate, struct object_id *oid, return error_errno("readlink(\"%s\")", path); if (!(flags & HASH_WRITE_OBJECT)) hash_object_file(the_hash_algo, sb.buf, sb.len, - blob_type, oid); - else if (write_object_file(sb.buf, sb.len, blob_type, oid)) + OBJ_BLOB, oid); + else if (write_object_file(sb.buf, sb.len, OBJ_BLOB, oid)) rc = error(_("%s: failed to insert into database"), path); strbuf_release(&sb); break; @@ -2599,9 +2645,10 @@ int read_loose_object(const char *path, git_inflate_end(&stream); goto out; } - if (check_object_signature(the_repository, expected_oid, + hash_object_file_literally(the_repository->hash_algo, *contents, *size, - oi->type_name->buf, real_oid)) + oi->type_name->buf, real_oid); + if (!oideq(expected_oid, real_oid)) goto out; } diff --git a/object-store.h b/object-store.h index 6f89482df0..bd2322ed8c 100644 --- a/object-store.h +++ b/object-store.h @@ -245,22 +245,22 @@ static inline void *repo_read_object_file(struct repository *r, /* Read and unpack an object file into memory, write memory to an object file */ int oid_object_info(struct repository *r, const struct object_id *, unsigned long *); -int hash_object_file(const struct git_hash_algo *algo, const void *buf, - unsigned long len, const char *type, - struct object_id *oid); +void hash_object_file(const struct git_hash_algo *algo, const void *buf, + unsigned long len, enum object_type type, + struct object_id *oid); int write_object_file_flags(const void *buf, unsigned long len, - const char *type, struct object_id *oid, + enum object_type type, struct object_id *oid, unsigned flags); static inline int write_object_file(const void *buf, unsigned long len, - const char *type, struct object_id *oid) + enum object_type type, struct object_id *oid) { return write_object_file_flags(buf, len, type, oid, 0); } -int hash_object_file_literally(const void *buf, unsigned long len, - const char *type, struct object_id *oid, - unsigned flags); +int write_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags); /* * Add an object file to the in-memory object store, without writing it @@ -331,6 +331,14 @@ int repo_has_object_file_with_flags(struct repository *r, */ int has_loose_object_nonlocal(const struct object_id *); +/** + * format_object_header() is a thin wrapper around s xsnprintf() that + * writes the initial "<type> <obj-len>" part of the loose object + * header. It returns the size that snprintf() returns + 1. + */ +int format_object_header(char *str, size_t size, enum object_type type, + size_t objsize); + void assert_oid_type(const struct object_id *oid, enum object_type expect); /* @@ -279,7 +279,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid) if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) || (!obj && repo_has_object_file(r, oid) && oid_object_info(r, oid, NULL) == OBJ_BLOB)) { - if (check_object_signature(r, repl, NULL, 0, NULL, NULL) < 0) { + if (stream_object_signature(r, repl) < 0) { error(_("hash mismatch %s"), oid_to_hex(oid)); return NULL; } @@ -289,8 +289,7 @@ struct object *parse_object(struct repository *r, const struct object_id *oid) buffer = repo_read_object_file(r, oid, &type, &size); if (buffer) { - if (check_object_signature(r, repl, buffer, size, - type_name(type), NULL) < 0) { + if (check_object_signature(r, repl, buffer, size, type) < 0) { free(buffer); error(_("hash mismatch %s"), oid_to_hex(repl)); return NULL; @@ -75,7 +75,7 @@ struct object_array { * builtin/fsck.c: 0--3 * builtin/gc.c: 0 * builtin/index-pack.c: 2021 - * builtin/reflog.c: 10--12 + * reflog.c: 10--12 * builtin/show-branch.c: 0-------------------------------------------26 * builtin/unpack-objects.c: 2021 */ diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index cab3eaa2ac..cf681547f2 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -719,7 +719,8 @@ void bitmap_writer_finish(struct pack_idx_entry **index, if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); - finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); + finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA, + CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); if (adjust_shared_perm(tmp_file.buf)) die_errno("unable to make temporary bitmap file readable"); diff --git a/pack-bitmap.c b/pack-bitmap.c index 9c666cdb8b..97909d48da 100644 --- a/pack-bitmap.c +++ b/pack-bitmap.c @@ -739,8 +739,7 @@ static int add_commit_to_bitmap(struct bitmap_index *bitmap_git, static struct bitmap *find_objects(struct bitmap_index *bitmap_git, struct rev_info *revs, struct object_list *roots, - struct bitmap *seen, - struct list_objects_filter_options *filter) + struct bitmap *seen) { struct bitmap *base = NULL; int needs_walk = 0; @@ -823,9 +822,9 @@ static struct bitmap *find_objects(struct bitmap_index *bitmap_git, show_data.bitmap_git = bitmap_git; show_data.base = base; - traverse_commit_list_filtered(filter, revs, - show_commit, show_object, - &show_data, NULL); + traverse_commit_list(revs, + show_commit, show_object, + &show_data); revs->include_check = NULL; revs->include_check_obj = NULL; @@ -1219,7 +1218,6 @@ static int can_filter_bitmap(struct list_objects_filter_options *filter) } struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, - struct list_objects_filter_options *filter, int filter_provided_objects) { unsigned int i; @@ -1240,7 +1238,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, if (revs->prune) return NULL; - if (!can_filter_bitmap(filter)) + if (!can_filter_bitmap(&revs->filter)) return NULL; /* try to open a bitmapped pack, but don't parse it yet @@ -1297,8 +1295,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, if (haves) { revs->ignore_missing_links = 1; - haves_bitmap = find_objects(bitmap_git, revs, haves, NULL, - filter); + haves_bitmap = find_objects(bitmap_git, revs, haves, NULL); reset_revision_walk(); revs->ignore_missing_links = 0; @@ -1306,8 +1303,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, BUG("failed to perform bitmap walk"); } - wants_bitmap = find_objects(bitmap_git, revs, wants, haves_bitmap, - filter); + wants_bitmap = find_objects(bitmap_git, revs, wants, haves_bitmap); if (!wants_bitmap) BUG("failed to perform bitmap walk"); @@ -1315,8 +1311,10 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, if (haves_bitmap) bitmap_and_not(wants_bitmap, haves_bitmap); - filter_bitmap(bitmap_git, (filter && filter_provided_objects) ? NULL : wants, - wants_bitmap, filter); + filter_bitmap(bitmap_git, + (revs->filter.choice && filter_provided_objects) ? NULL : wants, + wants_bitmap, + &revs->filter); bitmap_git->result = wants_bitmap; bitmap_git->haves = haves_bitmap; diff --git a/pack-bitmap.h b/pack-bitmap.h index 19a63fa1ab..3d3ddd7734 100644 --- a/pack-bitmap.h +++ b/pack-bitmap.h @@ -10,7 +10,6 @@ struct commit; struct repository; struct rev_info; -struct list_objects_filter_options; static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'}; @@ -54,7 +53,6 @@ void test_bitmap_walk(struct rev_info *revs); int test_bitmap_commits(struct repository *r); int test_bitmap_hashes(struct repository *r); struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs, - struct list_objects_filter_options *filter, int filter_provided_objects); uint32_t midx_preferred_pack(struct bitmap_index *bitmap_git); int reuse_partial_packfile_from_bitmap(struct bitmap_index *, diff --git a/pack-check.c b/pack-check.c index 3f418e3a6a..bfb593ba72 100644 --- a/pack-check.c +++ b/pack-check.c @@ -127,7 +127,7 @@ static int verify_packfile(struct repository *r, if (type == OBJ_BLOB && big_file_threshold <= size) { /* - * Let check_object_signature() check it with + * Let stream_object_signature() check it with * the streaming interface; no point slurping * the data in-core only to discard. */ @@ -142,8 +142,11 @@ static int verify_packfile(struct repository *r, err = error("cannot unpack %s from %s at offset %"PRIuMAX"", oid_to_hex(&oid), p->pack_name, (uintmax_t)entries[i].offset); - else if (check_object_signature(r, &oid, data, size, - type_name(type), NULL)) + else if (data && check_object_signature(r, &oid, data, size, + type) < 0) + err = error("packed %s from %s is corrupt", + oid_to_hex(&oid), p->pack_name); + else if (!data && stream_object_signature(r, &oid) < 0) err = error("packed %s from %s is corrupt", oid_to_hex(&oid), p->pack_name); else if (fn) { diff --git a/pack-write.c b/pack-write.c index a5846f3a34..51812cb129 100644 --- a/pack-write.c +++ b/pack-write.c @@ -159,9 +159,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec } hashwrite(f, sha1, the_hash_algo->rawsz); - finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_CLOSE | - ((opts->flags & WRITE_IDX_VERIFY) - ? 0 : CSUM_FSYNC)); + finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA, + CSUM_HASH_IN_STREAM | CSUM_CLOSE | + ((opts->flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC)); return index_name; } @@ -281,8 +281,9 @@ const char *write_rev_file_order(const char *rev_name, if (rev_name && adjust_shared_perm(rev_name) < 0) die(_("failed to make %s readable"), rev_name); - finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_CLOSE | - ((flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC)); + finalize_hashfile(f, NULL, FSYNC_COMPONENT_PACK_METADATA, + CSUM_HASH_IN_STREAM | CSUM_CLOSE | + ((flags & WRITE_IDX_VERIFY) ? 0 : CSUM_FSYNC)); return rev_name; } @@ -390,7 +391,7 @@ void fixup_pack_header_footer(int pack_fd, the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx); the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx); write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz); - fsync_or_die(pack_fd, pack_name); + fsync_component_or_die(FSYNC_COMPONENT_PACK, pack_fd, pack_name); } char *index_pack_lockfile(int ip_out, int *is_well_formed) @@ -169,20 +169,6 @@ void report_linked_checkout_garbage(void); return r->cached_paths.var; \ } -struct path_cache { - const char *squash_msg; - const char *merge_msg; - const char *merge_rr; - const char *merge_mode; - const char *merge_head; - const char *merge_autostash; - const char *auto_merge; - const char *fetch_head; - const char *shallow; -}; - -#define PATH_CACHE_INIT { 0 } - const char *git_path_squash_msg(struct repository *r); const char *git_path_merge_msg(struct repository *r); const char *git_path_merge_rr(struct repository *r); diff --git a/range-diff.c b/range-diff.c index 30a4de5c2d..b72eb9fdbe 100644 --- a/range-diff.c +++ b/range-diff.c @@ -40,6 +40,7 @@ static int read_patches(const char *range, struct string_list *list, char *line, *current_filename = NULL; ssize_t len; size_t size; + int ret = -1; strvec_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges", "--reverse", "--date-order", "--decorate=no", @@ -68,10 +69,10 @@ static int read_patches(const char *range, struct string_list *list, if (strbuf_read(&contents, cp.out, 0) < 0) { error_errno(_("could not read `log` output")); finish_command(&cp); - return -1; + goto cleanup; } if (finish_command(&cp)) - return -1; + goto cleanup; line = contents.buf; size = contents.len; @@ -95,12 +96,9 @@ static int read_patches(const char *range, struct string_list *list, CALLOC_ARRAY(util, 1); if (get_oid(p, &util->oid)) { error(_("could not parse commit '%s'"), p); - free(util); - free(current_filename); + FREE_AND_NULL(util); string_list_clear(list, 1); - strbuf_release(&buf); - strbuf_release(&contents); - return -1; + goto cleanup; } util->matching = -1; in_header = 1; @@ -111,11 +109,8 @@ static int read_patches(const char *range, struct string_list *list, error(_("could not parse first line of `log` output: " "did not start with 'commit ': '%s'"), line); - free(current_filename); string_list_clear(list, 1); - strbuf_release(&buf); - strbuf_release(&contents); - return -1; + goto cleanup; } if (starts_with(line, "diff --git")) { @@ -136,12 +131,9 @@ static int read_patches(const char *range, struct string_list *list, if (len < 0) { error(_("could not parse git header '%.*s'"), orig_len, line); - free(util); - free(current_filename); + FREE_AND_NULL(util); string_list_clear(list, 1); - strbuf_release(&buf); - strbuf_release(&contents); - return -1; + goto cleanup; } strbuf_addstr(&buf, " ## "); if (patch.is_new > 0) @@ -165,6 +157,7 @@ static int read_patches(const char *range, struct string_list *list, patch.old_mode, patch.new_mode); strbuf_addstr(&buf, " ##"); + release_patch(&patch); } else if (in_header) { if (starts_with(line, "Author: ")) { strbuf_addstr(&buf, " ## Metadata ##\n"); @@ -218,6 +211,9 @@ static int read_patches(const char *range, struct string_list *list, strbuf_addch(&buf, '\n'); util->diffsize++; } + + ret = 0; +cleanup: strbuf_release(&contents); if (util) @@ -225,7 +221,7 @@ static int read_patches(const char *range, struct string_list *list, strbuf_release(&buf); free(current_filename); - return 0; + return ret; } static int patch_util_cmp(const void *dummy, const struct patch_util *a, diff --git a/reachable.c b/reachable.c index 84e3d0d75e..b9f4ad886e 100644 --- a/reachable.c +++ b/reachable.c @@ -205,7 +205,7 @@ void mark_reachable_objects(struct rev_info *revs, int mark_reflog, cp.progress = progress; cp.count = 0; - bitmap_git = prepare_bitmap_walk(revs, NULL, 0); + bitmap_git = prepare_bitmap_walk(revs, 0); if (bitmap_git) { traverse_bitmap_commit_list(bitmap_git, revs, mark_object_seen); free_bitmap_index(bitmap_git); diff --git a/read-cache.c b/read-cache.c index 79b9b99ebf..3e0e7d4183 100644 --- a/read-cache.c +++ b/read-cache.c @@ -736,7 +736,7 @@ static struct cache_entry *create_alias_ce(struct index_state *istate, void set_object_name_for_intent_to_add_entry(struct cache_entry *ce) { struct object_id oid; - if (write_object_file("", 0, blob_type, &oid)) + if (write_object_file("", 0, OBJ_BLOB, &oid)) die(_("cannot create an empty blob in the object database")); oidcpy(&ce->oid, &oid); } @@ -2842,7 +2842,7 @@ static int record_ieot(void) * rely on it. */ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, - int strip_extensions) + int strip_extensions, unsigned flags) { uint64_t start = getnanotime(); struct hashfile *f; @@ -2856,6 +2856,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; int drop_cache_tree = istate->drop_cache_tree; off_t offset; + int csum_fsync_flag; int ieot_entries = 1; struct index_entry_offset_table *ieot = NULL; int nr, nr_threads; @@ -3089,7 +3090,13 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, return -1; } - finalize_hashfile(f, istate->oid.hash, CSUM_HASH_IN_STREAM); + csum_fsync_flag = 0; + if (!alternate_index_output && (flags & COMMIT_LOCK)) + csum_fsync_flag = CSUM_FSYNC; + + finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX, + CSUM_HASH_IN_STREAM | csum_fsync_flag); + if (close_tempfile_gently(tempfile)) { error(_("could not close '%s'"), get_tempfile_path(tempfile)); return -1; @@ -3144,7 +3151,7 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l */ trace2_region_enter_printf("index", "do_write_index", the_repository, "%s", get_lock_file_path(lock)); - ret = do_write_index(istate, lock->tempfile, 0); + ret = do_write_index(istate, lock->tempfile, 0, flags); trace2_region_leave_printf("index", "do_write_index", the_repository, "%s", get_lock_file_path(lock)); @@ -3238,7 +3245,7 @@ static int clean_shared_index_files(const char *current_hex) } static int write_shared_index(struct index_state *istate, - struct tempfile **temp) + struct tempfile **temp, unsigned flags) { struct split_index *si = istate->split_index; int ret, was_full = !istate->sparse_index; @@ -3248,7 +3255,7 @@ static int write_shared_index(struct index_state *istate, trace2_region_enter_printf("index", "shared/do_write_index", the_repository, "%s", get_tempfile_path(*temp)); - ret = do_write_index(si->base, *temp, 1); + ret = do_write_index(si->base, *temp, 1, flags); trace2_region_leave_printf("index", "shared/do_write_index", the_repository, "%s", get_tempfile_path(*temp)); @@ -3357,7 +3364,7 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock, ret = do_write_locked_index(istate, lock, flags); goto out; } - ret = write_shared_index(istate, &temp); + ret = write_shared_index(istate, &temp, flags); saved_errno = errno; if (is_tempfile_active(temp)) diff --git a/reflog.c b/reflog.c new file mode 100644 index 0000000000..47ba8620c5 --- /dev/null +++ b/reflog.c @@ -0,0 +1,434 @@ +#include "cache.h" +#include "object-store.h" +#include "reflog.h" +#include "refs.h" +#include "revision.h" +#include "worktree.h" + +/* Remember to update object flag allocation in object.h */ +#define INCOMPLETE (1u<<10) +#define STUDYING (1u<<11) +#define REACHABLE (1u<<12) + +static int tree_is_complete(const struct object_id *oid) +{ + struct tree_desc desc; + struct name_entry entry; + int complete; + struct tree *tree; + + tree = lookup_tree(the_repository, oid); + if (!tree) + return 0; + if (tree->object.flags & SEEN) + return 1; + if (tree->object.flags & INCOMPLETE) + return 0; + + if (!tree->buffer) { + enum object_type type; + unsigned long size; + void *data = read_object_file(oid, &type, &size); + if (!data) { + tree->object.flags |= INCOMPLETE; + return 0; + } + tree->buffer = data; + tree->size = size; + } + init_tree_desc(&desc, tree->buffer, tree->size); + complete = 1; + while (tree_entry(&desc, &entry)) { + if (!has_object_file(&entry.oid) || + (S_ISDIR(entry.mode) && !tree_is_complete(&entry.oid))) { + tree->object.flags |= INCOMPLETE; + complete = 0; + } + } + free_tree_buffer(tree); + + if (complete) + tree->object.flags |= SEEN; + return complete; +} + +static int commit_is_complete(struct commit *commit) +{ + struct object_array study; + struct object_array found; + int is_incomplete = 0; + int i; + + /* early return */ + if (commit->object.flags & SEEN) + return 1; + if (commit->object.flags & INCOMPLETE) + return 0; + /* + * Find all commits that are reachable and are not marked as + * SEEN. Then make sure the trees and blobs contained are + * complete. After that, mark these commits also as SEEN. + * If some of the objects that are needed to complete this + * commit are missing, mark this commit as INCOMPLETE. + */ + memset(&study, 0, sizeof(study)); + memset(&found, 0, sizeof(found)); + add_object_array(&commit->object, NULL, &study); + add_object_array(&commit->object, NULL, &found); + commit->object.flags |= STUDYING; + while (study.nr) { + struct commit *c; + struct commit_list *parent; + + c = (struct commit *)object_array_pop(&study); + if (!c->object.parsed && !parse_object(the_repository, &c->object.oid)) + c->object.flags |= INCOMPLETE; + + if (c->object.flags & INCOMPLETE) { + is_incomplete = 1; + break; + } + else if (c->object.flags & SEEN) + continue; + for (parent = c->parents; parent; parent = parent->next) { + struct commit *p = parent->item; + if (p->object.flags & STUDYING) + continue; + p->object.flags |= STUDYING; + add_object_array(&p->object, NULL, &study); + add_object_array(&p->object, NULL, &found); + } + } + if (!is_incomplete) { + /* + * make sure all commits in "found" array have all the + * necessary objects. + */ + for (i = 0; i < found.nr; i++) { + struct commit *c = + (struct commit *)found.objects[i].item; + if (!tree_is_complete(get_commit_tree_oid(c))) { + is_incomplete = 1; + c->object.flags |= INCOMPLETE; + } + } + if (!is_incomplete) { + /* mark all found commits as complete, iow SEEN */ + for (i = 0; i < found.nr; i++) + found.objects[i].item->flags |= SEEN; + } + } + /* clear flags from the objects we traversed */ + for (i = 0; i < found.nr; i++) + found.objects[i].item->flags &= ~STUDYING; + if (is_incomplete) + commit->object.flags |= INCOMPLETE; + else { + /* + * If we come here, we have (1) traversed the ancestry chain + * from the "commit" until we reach SEEN commits (which are + * known to be complete), and (2) made sure that the commits + * encountered during the above traversal refer to trees that + * are complete. Which means that we know *all* the commits + * we have seen during this process are complete. + */ + for (i = 0; i < found.nr; i++) + found.objects[i].item->flags |= SEEN; + } + /* free object arrays */ + object_array_clear(&study); + object_array_clear(&found); + return !is_incomplete; +} + +static int keep_entry(struct commit **it, struct object_id *oid) +{ + struct commit *commit; + + if (is_null_oid(oid)) + return 1; + commit = lookup_commit_reference_gently(the_repository, oid, 1); + if (!commit) + return 0; + + /* + * Make sure everything in this commit exists. + * + * We have walked all the objects reachable from the refs + * and cache earlier. The commits reachable by this commit + * must meet SEEN commits -- and then we should mark them as + * SEEN as well. + */ + if (!commit_is_complete(commit)) + return 0; + *it = commit; + return 1; +} + +/* + * Starting from commits in the cb->mark_list, mark commits that are + * reachable from them. Stop the traversal at commits older than + * the expire_limit and queue them back, so that the caller can call + * us again to restart the traversal with longer expire_limit. + */ +static void mark_reachable(struct expire_reflog_policy_cb *cb) +{ + struct commit_list *pending; + timestamp_t expire_limit = cb->mark_limit; + struct commit_list *leftover = NULL; + + for (pending = cb->mark_list; pending; pending = pending->next) + pending->item->object.flags &= ~REACHABLE; + + pending = cb->mark_list; + while (pending) { + struct commit_list *parent; + struct commit *commit = pop_commit(&pending); + if (commit->object.flags & REACHABLE) + continue; + if (parse_commit(commit)) + continue; + commit->object.flags |= REACHABLE; + if (commit->date < expire_limit) { + commit_list_insert(commit, &leftover); + continue; + } + commit->object.flags |= REACHABLE; + parent = commit->parents; + while (parent) { + commit = parent->item; + parent = parent->next; + if (commit->object.flags & REACHABLE) + continue; + commit_list_insert(commit, &pending); + } + } + cb->mark_list = leftover; +} + +static int unreachable(struct expire_reflog_policy_cb *cb, struct commit *commit, struct object_id *oid) +{ + /* + * We may or may not have the commit yet - if not, look it + * up using the supplied sha1. + */ + if (!commit) { + if (is_null_oid(oid)) + return 0; + + commit = lookup_commit_reference_gently(the_repository, oid, + 1); + + /* Not a commit -- keep it */ + if (!commit) + return 0; + } + + /* Reachable from the current ref? Don't prune. */ + if (commit->object.flags & REACHABLE) + return 0; + + if (cb->mark_list && cb->mark_limit) { + cb->mark_limit = 0; /* dig down to the root */ + mark_reachable(cb); + } + + return !(commit->object.flags & REACHABLE); +} + +/* + * Return true iff the specified reflog entry should be expired. + */ +int should_expire_reflog_ent(struct object_id *ooid, struct object_id *noid, + const char *email, timestamp_t timestamp, int tz, + const char *message, void *cb_data) +{ + struct expire_reflog_policy_cb *cb = cb_data; + struct commit *old_commit, *new_commit; + + if (timestamp < cb->cmd.expire_total) + return 1; + + old_commit = new_commit = NULL; + if (cb->cmd.stalefix && + (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid))) + return 1; + + if (timestamp < cb->cmd.expire_unreachable) { + switch (cb->unreachable_expire_kind) { + case UE_ALWAYS: + return 1; + case UE_NORMAL: + case UE_HEAD: + if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid)) + return 1; + break; + } + } + + if (cb->cmd.recno && --(cb->cmd.recno) == 0) + return 1; + + return 0; +} + +int should_expire_reflog_ent_verbose(struct object_id *ooid, + struct object_id *noid, + const char *email, + timestamp_t timestamp, int tz, + const char *message, void *cb_data) +{ + struct expire_reflog_policy_cb *cb = cb_data; + int expire; + + expire = should_expire_reflog_ent(ooid, noid, email, timestamp, tz, + message, cb); + + if (!expire) + printf("keep %s", message); + else if (cb->dry_run) + printf("would prune %s", message); + else + printf("prune %s", message); + + return expire; +} + +static int push_tip_to_list(const char *refname, const struct object_id *oid, + int flags, void *cb_data) +{ + struct commit_list **list = cb_data; + struct commit *tip_commit; + if (flags & REF_ISSYMREF) + return 0; + tip_commit = lookup_commit_reference_gently(the_repository, oid, 1); + if (!tip_commit) + return 0; + commit_list_insert(tip_commit, list); + return 0; +} + +static int is_head(const char *refname) +{ + switch (ref_type(refname)) { + case REF_TYPE_OTHER_PSEUDOREF: + case REF_TYPE_MAIN_PSEUDOREF: + if (parse_worktree_ref(refname, NULL, NULL, &refname)) + BUG("not a worktree ref: %s", refname); + break; + default: + break; + } + return !strcmp(refname, "HEAD"); +} + +void reflog_expiry_prepare(const char *refname, + const struct object_id *oid, + void *cb_data) +{ + struct expire_reflog_policy_cb *cb = cb_data; + struct commit_list *elem; + struct commit *commit = NULL; + + if (!cb->cmd.expire_unreachable || is_head(refname)) { + cb->unreachable_expire_kind = UE_HEAD; + } else { + commit = lookup_commit(the_repository, oid); + if (commit && is_null_oid(&commit->object.oid)) + commit = NULL; + cb->unreachable_expire_kind = commit ? UE_NORMAL : UE_ALWAYS; + } + + if (cb->cmd.expire_unreachable <= cb->cmd.expire_total) + cb->unreachable_expire_kind = UE_ALWAYS; + + switch (cb->unreachable_expire_kind) { + case UE_ALWAYS: + return; + case UE_HEAD: + for_each_ref(push_tip_to_list, &cb->tips); + for (elem = cb->tips; elem; elem = elem->next) + commit_list_insert(elem->item, &cb->mark_list); + break; + case UE_NORMAL: + commit_list_insert(commit, &cb->mark_list); + /* For reflog_expiry_cleanup() below */ + cb->tip_commit = commit; + } + cb->mark_limit = cb->cmd.expire_total; + mark_reachable(cb); +} + +void reflog_expiry_cleanup(void *cb_data) +{ + struct expire_reflog_policy_cb *cb = cb_data; + struct commit_list *elem; + + switch (cb->unreachable_expire_kind) { + case UE_ALWAYS: + return; + case UE_HEAD: + for (elem = cb->tips; elem; elem = elem->next) + clear_commit_marks(elem->item, REACHABLE); + free_commit_list(cb->tips); + break; + case UE_NORMAL: + clear_commit_marks(cb->tip_commit, REACHABLE); + break; + } +} + +int count_reflog_ent(struct object_id *ooid, struct object_id *noid, + const char *email, timestamp_t timestamp, int tz, + const char *message, void *cb_data) +{ + struct cmd_reflog_expire_cb *cb = cb_data; + if (!cb->expire_total || timestamp < cb->expire_total) + cb->recno++; + return 0; +} + +int reflog_delete(const char *rev, enum expire_reflog_flags flags, int verbose) +{ + struct cmd_reflog_expire_cb cmd = { 0 }; + int status = 0; + reflog_expiry_should_prune_fn *should_prune_fn = should_expire_reflog_ent; + const char *spec = strstr(rev, "@{"); + char *ep, *ref; + int recno; + struct expire_reflog_policy_cb cb = { + .dry_run = !!(flags & EXPIRE_REFLOGS_DRY_RUN), + }; + + if (verbose) + should_prune_fn = should_expire_reflog_ent_verbose; + + if (!spec) + return error(_("not a reflog: %s"), rev); + + if (!dwim_log(rev, spec - rev, NULL, &ref)) { + status |= error(_("no reflog for '%s'"), rev); + goto cleanup; + } + + recno = strtoul(spec + 2, &ep, 10); + if (*ep == '}') { + cmd.recno = -recno; + for_each_reflog_ent(ref, count_reflog_ent, &cmd); + } else { + cmd.expire_total = approxidate(spec + 2); + for_each_reflog_ent(ref, count_reflog_ent, &cmd); + cmd.expire_total = 0; + } + + cb.cmd = cmd; + status |= reflog_expire(ref, flags, + reflog_expiry_prepare, + should_prune_fn, + reflog_expiry_cleanup, + &cb); + + cleanup: + free(ref); + return status; +} diff --git a/reflog.h b/reflog.h new file mode 100644 index 0000000000..d2906fb9f8 --- /dev/null +++ b/reflog.h @@ -0,0 +1,43 @@ +#ifndef REFLOG_H +#define REFLOG_H +#include "refs.h" + +struct cmd_reflog_expire_cb { + int stalefix; + int explicit_expiry; + timestamp_t expire_total; + timestamp_t expire_unreachable; + int recno; +}; + +struct expire_reflog_policy_cb { + enum { + UE_NORMAL, + UE_ALWAYS, + UE_HEAD + } unreachable_expire_kind; + struct commit_list *mark_list; + unsigned long mark_limit; + struct cmd_reflog_expire_cb cmd; + struct commit *tip_commit; + struct commit_list *tips; + unsigned int dry_run:1; +}; + +int reflog_delete(const char *rev, enum expire_reflog_flags flags, + int verbose); +void reflog_expiry_cleanup(void *cb_data); +void reflog_expiry_prepare(const char *refname, const struct object_id *oid, + void *cb_data); +int should_expire_reflog_ent(struct object_id *ooid, struct object_id *noid, + const char *email, timestamp_t timestamp, int tz, + const char *message, void *cb_data); +int count_reflog_ent(struct object_id *ooid, struct object_id *noid, + const char *email, timestamp_t timestamp, int tz, + const char *message, void *cb_data); +int should_expire_reflog_ent_verbose(struct object_id *ooid, + struct object_id *noid, + const char *email, + timestamp_t timestamp, int tz, + const char *message, void *cb_data); +#endif /* REFLOG_H */ @@ -1673,6 +1673,12 @@ int refs_read_raw_ref(struct ref_store *ref_store, const char *refname, type, failure_errno); } +int refs_read_symbolic_ref(struct ref_store *ref_store, const char *refname, + struct strbuf *referent) +{ + return ref_store->be->read_symbolic_ref(ref_store, refname, referent); +} + const char *refs_resolve_ref_unsafe(struct ref_store *refs, const char *refname, int resolve_flags, @@ -2418,6 +2424,22 @@ int initial_ref_transaction_commit(struct ref_transaction *transaction, return refs->be->initial_transaction_commit(refs, transaction, err); } +void ref_transaction_for_each_queued_update(struct ref_transaction *transaction, + ref_transaction_for_each_queued_update_fn cb, + void *cb_data) +{ + int i; + + for (i = 0; i < transaction->nr; i++) { + struct ref_update *update = transaction->updates[i]; + + cb(update->refname, + (update->flags & REF_HAVE_OLD) ? &update->old_oid : NULL, + (update->flags & REF_HAVE_NEW) ? &update->new_oid : NULL, + cb_data); + } +} + int refs_delete_refs(struct ref_store *refs, const char *logmsg, struct string_list *refnames, unsigned int flags) { @@ -82,6 +82,9 @@ int read_ref_full(const char *refname, int resolve_flags, struct object_id *oid, int *flags); int read_ref(const char *refname, struct object_id *oid); +int refs_read_symbolic_ref(struct ref_store *ref_store, const char *refname, + struct strbuf *referent); + /* * Return 0 if a reference named refname could be created without * conflicting with the name of an existing reference. Otherwise, @@ -777,6 +780,20 @@ int initial_ref_transaction_commit(struct ref_transaction *transaction, struct strbuf *err); /* + * Execute the given callback function for each of the reference updates which + * have been queued in the given transaction. `old_oid` and `new_oid` may be + * `NULL` pointers depending on whether the update has these object IDs set or + * not. + */ +typedef void ref_transaction_for_each_queued_update_fn(const char *refname, + const struct object_id *old_oid, + const struct object_id *new_oid, + void *cb_data); +void ref_transaction_for_each_queued_update(struct ref_transaction *transaction, + ref_transaction_for_each_queued_update_fn cb, + void *cb_data); + +/* * Free `*transaction` and all associated data. */ void ref_transaction_free(struct ref_transaction *transaction); diff --git a/refs/debug.c b/refs/debug.c index 2b0771ca53..eed8bc94b0 100644 --- a/refs/debug.c +++ b/refs/debug.c @@ -220,8 +220,9 @@ static int debug_ref_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable debug_ref_iterator_vtable = { - debug_ref_iterator_advance, debug_ref_iterator_peel, - debug_ref_iterator_abort + .advance = debug_ref_iterator_advance, + .peel = debug_ref_iterator_peel, + .abort = debug_ref_iterator_abort, }; static struct ref_iterator * @@ -261,6 +262,24 @@ static int debug_read_raw_ref(struct ref_store *ref_store, const char *refname, return res; } +static int debug_read_symbolic_ref(struct ref_store *ref_store, const char *refname, + struct strbuf *referent) +{ + struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store; + struct ref_store *refs = drefs->refs; + int res; + + res = refs->be->read_symbolic_ref(refs, refname, referent); + if (!res) + trace_printf_key(&trace_refs, "read_symbolic_ref: %s: (%s)\n", + refname, referent->buf); + else + trace_printf_key(&trace_refs, + "read_symbolic_ref: %s: %d\n", refname, res); + return res; + +} + static struct ref_iterator * debug_reflog_iterator_begin(struct ref_store *ref_store) { @@ -418,29 +437,37 @@ static int debug_reflog_expire(struct ref_store *ref_store, const char *refname, } struct ref_storage_be refs_be_debug = { - NULL, - "debug", - NULL, - debug_init_db, - debug_transaction_prepare, - debug_transaction_finish, - debug_transaction_abort, - debug_initial_transaction_commit, - - debug_pack_refs, - debug_create_symref, - debug_delete_refs, - debug_rename_ref, - debug_copy_ref, - - debug_ref_iterator_begin, - debug_read_raw_ref, - - debug_reflog_iterator_begin, - debug_for_each_reflog_ent, - debug_for_each_reflog_ent_reverse, - debug_reflog_exists, - debug_create_reflog, - debug_delete_reflog, - debug_reflog_expire, + .next = NULL, + .name = "debug", + .init = NULL, + .init_db = debug_init_db, + + /* + * None of these should be NULL. If the "files" backend (in + * "struct ref_storage_be refs_be_files" in files-backend.c) + * has a function we should also have a wrapper for it here. + * Test the output with "GIT_TRACE_REFS=1". + */ + .transaction_prepare = debug_transaction_prepare, + .transaction_finish = debug_transaction_finish, + .transaction_abort = debug_transaction_abort, + .initial_transaction_commit = debug_initial_transaction_commit, + + .pack_refs = debug_pack_refs, + .create_symref = debug_create_symref, + .delete_refs = debug_delete_refs, + .rename_ref = debug_rename_ref, + .copy_ref = debug_copy_ref, + + .iterator_begin = debug_ref_iterator_begin, + .read_raw_ref = debug_read_raw_ref, + .read_symbolic_ref = debug_read_symbolic_ref, + + .reflog_iterator_begin = debug_reflog_iterator_begin, + .for_each_reflog_ent = debug_for_each_reflog_ent, + .for_each_reflog_ent_reverse = debug_for_each_reflog_ent_reverse, + .reflog_exists = debug_reflog_exists, + .create_reflog = debug_create_reflog, + .delete_reflog = debug_delete_reflog, + .reflog_expire = debug_reflog_expire, }; diff --git a/refs/files-backend.c b/refs/files-backend.c index f59589d6cc..95acab78ee 100644 --- a/refs/files-backend.c +++ b/refs/files-backend.c @@ -338,9 +338,9 @@ static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs) return refs->loose; } -static int files_read_raw_ref(struct ref_store *ref_store, const char *refname, - struct object_id *oid, struct strbuf *referent, - unsigned int *type, int *failure_errno) +static int read_ref_internal(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno, int skip_packed_refs) { struct files_ref_store *refs = files_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); @@ -381,7 +381,7 @@ stat_ref: if (lstat(path, &st) < 0) { int ignore_errno; myerr = errno; - if (myerr != ENOENT) + if (myerr != ENOENT || skip_packed_refs) goto out; if (refs_read_raw_ref(refs->packed_ref_store, refname, oid, referent, type, &ignore_errno)) { @@ -425,7 +425,8 @@ stat_ref: * ref is supposed to be, there could still be a * packed ref: */ - if (refs_read_raw_ref(refs->packed_ref_store, refname, oid, + if (skip_packed_refs || + refs_read_raw_ref(refs->packed_ref_store, refname, oid, referent, type, &ignore_errno)) { myerr = EISDIR; goto out; @@ -470,6 +471,27 @@ out: return ret; } +static int files_read_raw_ref(struct ref_store *ref_store, const char *refname, + struct object_id *oid, struct strbuf *referent, + unsigned int *type, int *failure_errno) +{ + return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0); +} + +static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname, + struct strbuf *referent) +{ + struct object_id oid; + int failure_errno, ret; + unsigned int type; + + ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1); + if (ret) + return ret; + + return !(type & REF_ISSYMREF); +} + int parse_loose_ref_contents(const char *buf, struct object_id *oid, struct strbuf *referent, unsigned int *type, int *failure_errno) @@ -800,9 +822,9 @@ static int files_ref_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable files_ref_iterator_vtable = { - files_ref_iterator_advance, - files_ref_iterator_peel, - files_ref_iterator_abort + .advance = files_ref_iterator_advance, + .peel = files_ref_iterator_peel, + .abort = files_ref_iterator_abort, }; static struct ref_iterator *files_ref_iterator_begin( @@ -1787,6 +1809,7 @@ static int write_ref_to_lockfile(struct ref_lock *lock, fd = get_lock_file_fd(&lock->lk); if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 || write_in_full(fd, &term, 1) < 0 || + fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 || close_ref_gently(lock) < 0) { strbuf_addf(err, "couldn't write '%s'", get_lock_file_path(&lock->lk)); @@ -2209,9 +2232,9 @@ static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable files_reflog_iterator_vtable = { - files_reflog_iterator_advance, - files_reflog_iterator_peel, - files_reflog_iterator_abort + .advance = files_reflog_iterator_advance, + .peel = files_reflog_iterator_peel, + .abort = files_reflog_iterator_abort, }; static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store, @@ -3269,29 +3292,30 @@ static int files_init_db(struct ref_store *ref_store, struct strbuf *err) } struct ref_storage_be refs_be_files = { - NULL, - "files", - files_ref_store_create, - files_init_db, - files_transaction_prepare, - files_transaction_finish, - files_transaction_abort, - files_initial_transaction_commit, - - files_pack_refs, - files_create_symref, - files_delete_refs, - files_rename_ref, - files_copy_ref, - - files_ref_iterator_begin, - files_read_raw_ref, - - files_reflog_iterator_begin, - files_for_each_reflog_ent, - files_for_each_reflog_ent_reverse, - files_reflog_exists, - files_create_reflog, - files_delete_reflog, - files_reflog_expire + .next = NULL, + .name = "files", + .init = files_ref_store_create, + .init_db = files_init_db, + .transaction_prepare = files_transaction_prepare, + .transaction_finish = files_transaction_finish, + .transaction_abort = files_transaction_abort, + .initial_transaction_commit = files_initial_transaction_commit, + + .pack_refs = files_pack_refs, + .create_symref = files_create_symref, + .delete_refs = files_delete_refs, + .rename_ref = files_rename_ref, + .copy_ref = files_copy_ref, + + .iterator_begin = files_ref_iterator_begin, + .read_raw_ref = files_read_raw_ref, + .read_symbolic_ref = files_read_symbolic_ref, + + .reflog_iterator_begin = files_reflog_iterator_begin, + .for_each_reflog_ent = files_for_each_reflog_ent, + .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse, + .reflog_exists = files_reflog_exists, + .create_reflog = files_create_reflog, + .delete_reflog = files_delete_reflog, + .reflog_expire = files_reflog_expire }; diff --git a/refs/iterator.c b/refs/iterator.c index a89d132d4f..b2e56bae1c 100644 --- a/refs/iterator.c +++ b/refs/iterator.c @@ -64,9 +64,9 @@ static int empty_ref_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable empty_ref_iterator_vtable = { - empty_ref_iterator_advance, - empty_ref_iterator_peel, - empty_ref_iterator_abort + .advance = empty_ref_iterator_advance, + .peel = empty_ref_iterator_peel, + .abort = empty_ref_iterator_abort, }; struct ref_iterator *empty_ref_iterator_begin(void) @@ -201,9 +201,9 @@ static int merge_ref_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable merge_ref_iterator_vtable = { - merge_ref_iterator_advance, - merge_ref_iterator_peel, - merge_ref_iterator_abort + .advance = merge_ref_iterator_advance, + .peel = merge_ref_iterator_peel, + .abort = merge_ref_iterator_abort, }; struct ref_iterator *merge_ref_iterator_begin( @@ -378,9 +378,9 @@ static int prefix_ref_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable prefix_ref_iterator_vtable = { - prefix_ref_iterator_advance, - prefix_ref_iterator_peel, - prefix_ref_iterator_abort + .advance = prefix_ref_iterator_advance, + .peel = prefix_ref_iterator_peel, + .abort = prefix_ref_iterator_abort, }; struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0, diff --git a/refs/packed-backend.c b/refs/packed-backend.c index 27dd8c3922..66c4574c99 100644 --- a/refs/packed-backend.c +++ b/refs/packed-backend.c @@ -911,9 +911,9 @@ static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable packed_ref_iterator_vtable = { - packed_ref_iterator_advance, - packed_ref_iterator_peel, - packed_ref_iterator_abort + .advance = packed_ref_iterator_advance, + .peel = packed_ref_iterator_peel, + .abort = packed_ref_iterator_abort }; static struct ref_iterator *packed_ref_iterator_begin( @@ -1262,7 +1262,8 @@ static int write_with_updates(struct packed_ref_store *refs, goto error; } - if (close_tempfile_gently(refs->tempfile)) { + if (fsync_component(FSYNC_COMPONENT_REFERENCE, get_tempfile_fd(refs->tempfile)) || + close_tempfile_gently(refs->tempfile)) { strbuf_addf(err, "error closing file %s: %s", get_tempfile_path(refs->tempfile), strerror(errno)); @@ -1591,105 +1592,36 @@ static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags) return 0; } -static int packed_create_symref(struct ref_store *ref_store, - const char *refname, const char *target, - const char *logmsg) -{ - BUG("packed reference store does not support symrefs"); -} - -static int packed_rename_ref(struct ref_store *ref_store, - const char *oldrefname, const char *newrefname, - const char *logmsg) -{ - BUG("packed reference store does not support renaming references"); -} - -static int packed_copy_ref(struct ref_store *ref_store, - const char *oldrefname, const char *newrefname, - const char *logmsg) -{ - BUG("packed reference store does not support copying references"); -} - static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store) { return empty_ref_iterator_begin(); } -static int packed_for_each_reflog_ent(struct ref_store *ref_store, - const char *refname, - each_reflog_ent_fn fn, void *cb_data) -{ - BUG("packed reference store does not support reflogs"); - return 0; -} - -static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store, - const char *refname, - each_reflog_ent_fn fn, - void *cb_data) -{ - BUG("packed reference store does not support reflogs"); - return 0; -} - -static int packed_reflog_exists(struct ref_store *ref_store, - const char *refname) -{ - BUG("packed reference store does not support reflogs"); - return 0; -} - -static int packed_create_reflog(struct ref_store *ref_store, - const char *refname, struct strbuf *err) -{ - BUG("packed reference store does not support reflogs"); -} - -static int packed_delete_reflog(struct ref_store *ref_store, - const char *refname) -{ - BUG("packed reference store does not support reflogs"); - return 0; -} - -static int packed_reflog_expire(struct ref_store *ref_store, - const char *refname, - unsigned int flags, - reflog_expiry_prepare_fn prepare_fn, - reflog_expiry_should_prune_fn should_prune_fn, - reflog_expiry_cleanup_fn cleanup_fn, - void *policy_cb_data) -{ - BUG("packed reference store does not support reflogs"); - return 0; -} - struct ref_storage_be refs_be_packed = { - NULL, - "packed", - packed_ref_store_create, - packed_init_db, - packed_transaction_prepare, - packed_transaction_finish, - packed_transaction_abort, - packed_initial_transaction_commit, - - packed_pack_refs, - packed_create_symref, - packed_delete_refs, - packed_rename_ref, - packed_copy_ref, - - packed_ref_iterator_begin, - packed_read_raw_ref, - - packed_reflog_iterator_begin, - packed_for_each_reflog_ent, - packed_for_each_reflog_ent_reverse, - packed_reflog_exists, - packed_create_reflog, - packed_delete_reflog, - packed_reflog_expire + .next = NULL, + .name = "packed", + .init = packed_ref_store_create, + .init_db = packed_init_db, + .transaction_prepare = packed_transaction_prepare, + .transaction_finish = packed_transaction_finish, + .transaction_abort = packed_transaction_abort, + .initial_transaction_commit = packed_initial_transaction_commit, + + .pack_refs = packed_pack_refs, + .create_symref = NULL, + .delete_refs = packed_delete_refs, + .rename_ref = NULL, + .copy_ref = NULL, + + .iterator_begin = packed_ref_iterator_begin, + .read_raw_ref = packed_read_raw_ref, + .read_symbolic_ref = NULL, + + .reflog_iterator_begin = packed_reflog_iterator_begin, + .for_each_reflog_ent = NULL, + .for_each_reflog_ent_reverse = NULL, + .reflog_exists = NULL, + .create_reflog = NULL, + .delete_reflog = NULL, + .reflog_expire = NULL, }; diff --git a/refs/ref-cache.c b/refs/ref-cache.c index be4aa5e098..3080ef944d 100644 --- a/refs/ref-cache.c +++ b/refs/ref-cache.c @@ -456,9 +456,9 @@ static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator) } static struct ref_iterator_vtable cache_ref_iterator_vtable = { - cache_ref_iterator_advance, - cache_ref_iterator_peel, - cache_ref_iterator_abort + .advance = cache_ref_iterator_advance, + .peel = cache_ref_iterator_peel, + .abort = cache_ref_iterator_abort }; struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache, diff --git a/refs/refs-internal.h b/refs/refs-internal.h index 6e15db3ca4..001ef15835 100644 --- a/refs/refs-internal.h +++ b/refs/refs-internal.h @@ -649,6 +649,21 @@ typedef int read_raw_ref_fn(struct ref_store *ref_store, const char *refname, struct object_id *oid, struct strbuf *referent, unsigned int *type, int *failure_errno); +/* + * Read a symbolic reference from the specified reference store. This function + * is optional: if not implemented by a backend, then `read_raw_ref_fn` is used + * to read the symbolcic reference instead. It is intended to be implemented + * only in case the backend can optimize the reading of symbolic references. + * + * Return 0 on success, or -1 on failure. `referent` will be set to the target + * of the symbolic reference on success. This function explicitly does not + * distinguish between error cases and the reference not being a symbolic + * reference to allow backends to optimize this operation in case symbolic and + * non-symbolic references are treated differently. + */ +typedef int read_symbolic_ref_fn(struct ref_store *ref_store, const char *refname, + struct strbuf *referent); + struct ref_storage_be { struct ref_storage_be *next; const char *name; @@ -668,6 +683,7 @@ struct ref_storage_be { ref_iterator_begin_fn *iterator_begin; read_raw_ref_fn *read_raw_ref; + read_symbolic_ref_fn *read_symbolic_ref; reflog_iterator_begin_fn *reflog_iterator_begin; for_each_reflog_ent_fn *for_each_reflog_ent; diff --git a/reftable/block.c b/reftable/block.c index 2170748c5e..34d4d07369 100644 --- a/reftable/block.c +++ b/reftable/block.c @@ -88,8 +88,9 @@ uint8_t block_writer_type(struct block_writer *bw) return bw->buf[bw->header_off]; } -/* adds the reftable_record to the block. Returns -1 if it does not fit, 0 on - success */ +/* Adds the reftable_record to the block. Returns -1 if it does not fit, 0 on + success. Returns REFTABLE_API_ERROR if attempting to write a record with + empty key. */ int block_writer_add(struct block_writer *w, struct reftable_record *rec) { struct strbuf empty = STRBUF_INIT; @@ -105,8 +106,14 @@ int block_writer_add(struct block_writer *w, struct reftable_record *rec) int is_restart = 0; struct strbuf key = STRBUF_INIT; int n = 0; + int err = -1; reftable_record_key(rec, &key); + if (!key.len) { + err = REFTABLE_API_ERROR; + goto done; + } + n = reftable_encode_key(&is_restart, out, last, key, reftable_record_val_type(rec)); if (n < 0) @@ -118,16 +125,11 @@ int block_writer_add(struct block_writer *w, struct reftable_record *rec) goto done; string_view_consume(&out, n); - if (block_writer_register_restart(w, start.len - out.len, is_restart, - &key) < 0) - goto done; - - strbuf_release(&key); - return 0; - + err = block_writer_register_restart(w, start.len - out.len, is_restart, + &key); done: strbuf_release(&key); - return -1; + return err; } int block_writer_finish(struct block_writer *w) @@ -332,6 +334,9 @@ int block_iter_next(struct block_iter *it, struct reftable_record *rec) if (n < 0) return -1; + if (!key.len) + return REFTABLE_FORMAT_ERROR; + string_view_consume(&in, n); n = reftable_record_decode(rec, key, extra, in, it->br->hash_size); if (n < 0) @@ -358,6 +363,8 @@ int block_reader_first_key(struct block_reader *br, struct strbuf *key) int n = reftable_decode_key(key, &extra, empty, in); if (n < 0) return n; + if (!key->len) + return REFTABLE_FORMAT_ERROR; return 0; } diff --git a/reftable/block_test.c b/reftable/block_test.c index fa2ee092ec..cb88af4a56 100644 --- a/reftable/block_test.c +++ b/reftable/block_test.c @@ -42,6 +42,11 @@ static void test_block_read_write(void) block_writer_init(&bw, BLOCK_TYPE_REF, block.data, block_size, header_off, hash_size(GIT_SHA1_FORMAT_ID)); + rec.u.ref.refname = ""; + rec.u.ref.value_type = REFTABLE_REF_DELETION; + n = block_writer_add(&bw, &rec); + EXPECT(n == REFTABLE_API_ERROR); + for (i = 0; i < N; i++) { char name[100]; uint8_t hash[GIT_SHA1_RAWSZ]; diff --git a/reftable/reader.c b/reftable/reader.c index 00906e7a2d..54b4025105 100644 --- a/reftable/reader.c +++ b/reftable/reader.c @@ -155,6 +155,11 @@ static int parse_footer(struct reftable_reader *r, uint8_t *footer, r->log_offsets.is_present = (first_block_typ == BLOCK_TYPE_LOG || r->log_offsets.offset > 0); r->obj_offsets.is_present = r->obj_offsets.offset > 0; + if (r->obj_offsets.is_present && !r->object_id_len) { + err = REFTABLE_FORMAT_ERROR; + goto done; + } + err = 0; done: return err; diff --git a/reftable/readwrite_test.c b/reftable/readwrite_test.c index 605ba0f9fd..469ab79a5a 100644 --- a/reftable/readwrite_test.c +++ b/reftable/readwrite_test.c @@ -100,7 +100,7 @@ static void write_table(char ***names, struct strbuf *buf, int N, n = reftable_writer_close(w); EXPECT(n == 0); - stats = writer_stats(w); + stats = reftable_writer_stats(w); for (i = 0; i < stats->ref_stats.blocks; i++) { int off = i * opts.block_size; if (off == 0) { @@ -239,7 +239,7 @@ static void test_log_write_read(void) n = reftable_writer_close(w); EXPECT(n == 0); - stats = writer_stats(w); + stats = reftable_writer_stats(w); EXPECT(stats->log_stats.blocks > 0); reftable_writer_free(w); w = NULL; @@ -330,7 +330,7 @@ static void test_log_zlib_corruption(void) n = reftable_writer_close(w); EXPECT(n == 0); - stats = writer_stats(w); + stats = reftable_writer_stats(w); EXPECT(stats->log_stats.blocks > 0); reftable_writer_free(w); w = NULL; @@ -667,6 +667,102 @@ static void test_write_empty_table(void) strbuf_release(&buf); } +static void test_write_object_id_min_length(void) +{ + struct reftable_write_options opts = { + .block_size = 75, + }; + struct strbuf buf = STRBUF_INIT; + struct reftable_writer *w = + reftable_new_writer(&strbuf_add_void, &buf, &opts); + uint8_t hash[GIT_SHA1_RAWSZ] = {42}; + struct reftable_ref_record ref = { + .update_index = 1, + .value_type = REFTABLE_REF_VAL1, + .value.val1 = hash, + }; + int err; + int i; + + reftable_writer_set_limits(w, 1, 1); + + /* Write the same hash in many refs. If there is only 1 hash, the + * disambiguating prefix is length 0 */ + for (i = 0; i < 256; i++) { + char name[256]; + snprintf(name, sizeof(name), "ref%05d", i); + ref.refname = name; + err = reftable_writer_add_ref(w, &ref); + EXPECT_ERR(err); + } + + err = reftable_writer_close(w); + EXPECT_ERR(err); + EXPECT(reftable_writer_stats(w)->object_id_len == 2); + reftable_writer_free(w); + strbuf_release(&buf); +} + +static void test_write_object_id_length(void) +{ + struct reftable_write_options opts = { + .block_size = 75, + }; + struct strbuf buf = STRBUF_INIT; + struct reftable_writer *w = + reftable_new_writer(&strbuf_add_void, &buf, &opts); + uint8_t hash[GIT_SHA1_RAWSZ] = {42}; + struct reftable_ref_record ref = { + .update_index = 1, + .value_type = REFTABLE_REF_VAL1, + .value.val1 = hash, + }; + int err; + int i; + + reftable_writer_set_limits(w, 1, 1); + + /* Write the same hash in many refs. If there is only 1 hash, the + * disambiguating prefix is length 0 */ + for (i = 0; i < 256; i++) { + char name[256]; + snprintf(name, sizeof(name), "ref%05d", i); + ref.refname = name; + ref.value.val1[15] = i; + err = reftable_writer_add_ref(w, &ref); + EXPECT_ERR(err); + } + + err = reftable_writer_close(w); + EXPECT_ERR(err); + EXPECT(reftable_writer_stats(w)->object_id_len == 16); + reftable_writer_free(w); + strbuf_release(&buf); +} + +static void test_write_empty_key(void) +{ + struct reftable_write_options opts = { 0 }; + struct strbuf buf = STRBUF_INIT; + struct reftable_writer *w = + reftable_new_writer(&strbuf_add_void, &buf, &opts); + struct reftable_ref_record ref = { + .refname = "", + .update_index = 1, + .value_type = REFTABLE_REF_DELETION, + }; + int err; + + reftable_writer_set_limits(w, 1, 1); + err = reftable_writer_add_ref(w, &ref); + EXPECT(err == REFTABLE_API_ERROR); + + err = reftable_writer_close(w); + EXPECT(err == REFTABLE_EMPTY_TABLE_ERROR); + reftable_writer_free(w); + strbuf_release(&buf); +} + static void test_write_key_order(void) { struct reftable_write_options opts = { 0 }; @@ -746,7 +842,10 @@ int readwrite_test_main(int argc, const char *argv[]) RUN_TEST(test_table_read_write_seek_index); RUN_TEST(test_table_refs_for_no_index); RUN_TEST(test_table_refs_for_obj_index); + RUN_TEST(test_write_empty_key); RUN_TEST(test_write_empty_table); RUN_TEST(test_log_overflow); + RUN_TEST(test_write_object_id_length); + RUN_TEST(test_write_object_id_min_length); return 0; } diff --git a/reftable/reftable-writer.h b/reftable/reftable-writer.h index a560dc1725..db8de197f6 100644 --- a/reftable/reftable-writer.h +++ b/reftable/reftable-writer.h @@ -143,7 +143,7 @@ int reftable_writer_close(struct reftable_writer *w); This struct becomes invalid when the writer is freed. */ -const struct reftable_stats *writer_stats(struct reftable_writer *w); +const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w); /* reftable_writer_free deallocates memory for the writer */ void reftable_writer_free(struct reftable_writer *w); diff --git a/reftable/writer.c b/reftable/writer.c index 944c2329ab..6d979e245f 100644 --- a/reftable/writer.c +++ b/reftable/writer.c @@ -240,14 +240,13 @@ static int writer_add_record(struct reftable_writer *w, writer_reinit_block_writer(w, reftable_record_type(rec)); err = block_writer_add(w->block_writer, rec); - if (err < 0) { + if (err == -1) { /* we are writing into memory, so an error can only mean it * doesn't fit. */ err = REFTABLE_ENTRY_TOO_BIG_ERROR; goto done; } - err = 0; done: strbuf_release(&key); return err; @@ -516,7 +515,9 @@ static void object_record_free(void *void_arg, void *key) static int writer_dump_object_index(struct reftable_writer *w) { struct write_record_arg closure = { .w = w }; - struct common_prefix_arg common = { NULL }; + struct common_prefix_arg common = { + .max = 1, /* obj_id_len should be >= 2. */ + }; if (w->obj_index_tree) { infix_walk(w->obj_index_tree, &update_common, &common); } @@ -694,7 +695,7 @@ static int writer_flush_block(struct reftable_writer *w) return writer_flush_nonempty_block(w); } -const struct reftable_stats *writer_stats(struct reftable_writer *w) +const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w) { return &w->stats; } diff --git a/remote-curl.c b/remote-curl.c index 0dabef2dd7..ff44f41011 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -1472,11 +1472,12 @@ int cmd_main(int argc, const char **argv) { struct strbuf buf = STRBUF_INIT; int nongit; + int ret = 1; setup_git_directory_gently(&nongit); if (argc < 2) { error(_("remote-curl: usage: git remote-curl <remote> [<url>]")); - return 1; + goto cleanup; } options.verbosity = 1; @@ -1508,7 +1509,7 @@ int cmd_main(int argc, const char **argv) if (strbuf_getline_lf(&buf, stdin) == EOF) { if (ferror(stdin)) error(_("remote-curl: error reading command stream from git")); - return 1; + goto cleanup; } if (buf.len == 0) break; @@ -1556,12 +1557,15 @@ int cmd_main(int argc, const char **argv) break; } else { error(_("remote-curl: unknown command '%s' from git"), buf.buf); - return 1; + goto cleanup; } strbuf_reset(&buf); } while (1); http_cleanup(); + ret = 0; +cleanup: + strbuf_release(&buf); - return 0; + return ret; } @@ -1945,13 +1945,9 @@ const char *branch_get_push(struct branch *branch, struct strbuf *err) return branch->push_tracking_ref; } -static int ignore_symref_update(const char *refname) +static int ignore_symref_update(const char *refname, struct strbuf *scratch) { - int flag; - - if (!resolve_ref_unsafe(refname, 0, NULL, &flag)) - return 0; /* non-existing refs are OK */ - return (flag & REF_ISSYMREF); + return !refs_read_symbolic_ref(get_main_ref_store(the_repository), refname, scratch); } /* @@ -1964,6 +1960,7 @@ static int ignore_symref_update(const char *refname) static struct ref *get_expanded_map(const struct ref *remote_refs, const struct refspec_item *refspec) { + struct strbuf scratch = STRBUF_INIT; const struct ref *ref; struct ref *ret = NULL; struct ref **tail = &ret; @@ -1971,11 +1968,13 @@ static struct ref *get_expanded_map(const struct ref *remote_refs, for (ref = remote_refs; ref; ref = ref->next) { char *expn_name = NULL; + strbuf_reset(&scratch); + if (strchr(ref->name, '^')) continue; /* a dereference item */ if (match_name_with_pattern(refspec->src, ref->name, refspec->dst, &expn_name) && - !ignore_symref_update(expn_name)) { + !ignore_symref_update(expn_name, &scratch)) { struct ref *cpy = copy_ref(ref); cpy->peer_ref = alloc_ref(expn_name); @@ -1987,6 +1986,7 @@ static struct ref *get_expanded_map(const struct ref *remote_refs, free(expn_name); } + strbuf_release(&scratch); return ret; } diff --git a/repository.c b/repository.c index 34610c5a33..5d166b692c 100644 --- a/repository.c +++ b/repository.c @@ -240,6 +240,20 @@ out: return ret; } +static void repo_clear_path_cache(struct repo_path_cache *cache) +{ + FREE_AND_NULL(cache->squash_msg); + FREE_AND_NULL(cache->squash_msg); + FREE_AND_NULL(cache->merge_msg); + FREE_AND_NULL(cache->merge_rr); + FREE_AND_NULL(cache->merge_mode); + FREE_AND_NULL(cache->merge_head); + FREE_AND_NULL(cache->merge_autostash); + FREE_AND_NULL(cache->auto_merge); + FREE_AND_NULL(cache->fetch_head); + FREE_AND_NULL(cache->shallow); +} + void repo_clear(struct repository *repo) { FREE_AND_NULL(repo->gitdir); @@ -280,6 +294,8 @@ void repo_clear(struct repository *repo) remote_state_clear(repo->remote_state); FREE_AND_NULL(repo->remote_state); } + + repo_clear_path_cache(&repo->cached_paths); } int repo_read_index(struct repository *repo) @@ -301,6 +317,13 @@ int repo_read_index(struct repository *repo) if (repo->settings.command_requires_full_index) ensure_full_index(repo->index); + /* + * If sparse checkouts are in use, check whether paths with the + * SKIP_WORKTREE attribute are missing from the worktree; if not, + * clear that attribute for that path. + */ + clear_skip_worktree_from_present_files(repo->index); + return res; } diff --git a/repository.h b/repository.h index ca837cb9e9..e29f361703 100644 --- a/repository.h +++ b/repository.h @@ -44,6 +44,18 @@ struct repo_settings { int core_multi_pack_index; }; +struct repo_path_cache { + char *squash_msg; + char *merge_msg; + char *merge_rr; + char *merge_mode; + char *merge_head; + char *merge_autostash; + char *auto_merge; + char *fetch_head; + char *shallow; +}; + struct repository { /* Environment */ /* @@ -82,7 +94,7 @@ struct repository { /* * Contains path to often used file names. */ - struct path_cache cached_paths; + struct repo_path_cache cached_paths; /* * Path to the repository's graft file. diff --git a/revision.c b/revision.c index 2bb913f2f3..2646b78990 100644 --- a/revision.c +++ b/revision.c @@ -32,6 +32,7 @@ #include "utf8.h" #include "bloom.h" #include "json-writer.h" +#include "list-objects-filter-options.h" volatile show_early_output_fn_t show_early_output; @@ -2690,6 +2691,10 @@ static int handle_revision_pseudo_opt(struct rev_info *revs, revs->no_walk = 0; } else if (!strcmp(arg, "--single-worktree")) { revs->single_worktree = 1; + } else if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) { + parse_list_objects_filter(&revs->filter, arg); + } else if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) { + list_objects_filter_set_no_filter(&revs->filter); } else { return 0; } @@ -2892,6 +2897,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s die("cannot combine --walk-reflogs with history-limiting options"); if (revs->rewrite_parents && revs->children.name) die(_("options '%s' and '%s' cannot be used together"), "--parents", "--children"); + if (revs->filter.choice && !revs->blob_objects) + die(_("object filtering requires --objects")); /* * Limitations on the graph functionality diff --git a/revision.h b/revision.h index b9c2421687..5bc59c7bfe 100644 --- a/revision.h +++ b/revision.h @@ -8,6 +8,7 @@ #include "pretty.h" #include "diff.h" #include "commit-slab-decl.h" +#include "list-objects-filter-options.h" /** * The revision walking API offers functions to build a list of revisions @@ -94,6 +95,12 @@ struct rev_info { /* The end-points specified by the end user */ struct rev_cmdline_info cmdline; + /* + * Object filter options. No filtering is specified + * if and only if filter.choice is zero. + */ + struct list_objects_filter_options filter; + /* excluding from --branches, --refs, etc. expansion */ struct string_list *ref_excludes; diff --git a/sequencer.c b/sequencer.c index 84eed9e96b..a1bb39383d 100644 --- a/sequencer.c +++ b/sequencer.c @@ -3749,7 +3749,7 @@ static int do_merge(struct repository *r, int run_commit_flags = 0; struct strbuf ref_name = STRBUF_INIT; struct commit *head_commit, *merge_commit, *i; - struct commit_list *bases, *j, *reversed = NULL; + struct commit_list *bases, *j; struct commit_list *to_merge = NULL, **tail = &to_merge; const char *strategy = !opts->xopts_nr && (!opts->strategy || @@ -3984,9 +3984,7 @@ static int do_merge(struct repository *r, git_path_merge_head(r), 0); write_message("no-ff", 5, git_path_merge_mode(r), 0); - for (j = bases; j; j = j->next) - commit_list_insert(j->item, &reversed); - free_commit_list(bases); + bases = reverse_commit_list(bases); repo_read_index(r); init_merge_options(&o, r); @@ -4002,10 +4000,10 @@ static int do_merge(struct repository *r, * update the index and working copy immediately. */ ret = merge_ort_recursive(&o, - head_commit, merge_commit, reversed, + head_commit, merge_commit, bases, &i); } else { - ret = merge_recursive(&o, head_commit, merge_commit, reversed, + ret = merge_recursive(&o, head_commit, merge_commit, bases, &i); } if (ret <= 0) @@ -90,6 +90,7 @@ static void reset_repository_shallow(struct repository *r) { r->parsed_objects->is_shallow = -1; stat_validity_clear(r->parsed_objects->shallow_stat); + reset_commit_grafts(r); } int commit_shallow_file(struct repository *r, struct shallow_lock *lk) diff --git a/shared.mak b/shared.mak new file mode 100644 index 0000000000..4e1b62ee99 --- /dev/null +++ b/shared.mak @@ -0,0 +1,103 @@ +### Remove GNU make implicit rules + +## This speeds things up since we don't need to look for and stat() a +## "foo.c,v" every time a rule referring to "foo.c" is in play. See +## "make -p -f/dev/null | grep ^%::'". +%:: %,v +%:: RCS/%,v +%:: RCS/% +%:: s.% +%:: SCCS/s.% + +## Likewise delete default $(SUFFIXES). See: +## +## info make --index-search=.SUFFIXES +.SUFFIXES: + +### Flags affecting all rules + +# A GNU make extension since gmake 3.72 (released in late 1994) to +# remove the target of rules if commands in those rules fail. The +# default is to only do that if make itself receives a signal. Affects +# all targets, see: +# +# info make --index-search=.DELETE_ON_ERROR +.DELETE_ON_ERROR: + +### Global variables + +## comma, empty, space: handy variables as these tokens are either +## special or can be hard to spot among other Makefile syntax. +comma := , +empty := +space := $(empty) $(empty) + +### Quieting +## common +QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir +QUIET_SUBDIR1 = + +ifneq ($(findstring w,$(MAKEFLAGS)),w) +PRINT_DIR = --no-print-directory +else # "make -w" +NO_SUBDIR = : +endif + +ifneq ($(findstring s,$(MAKEFLAGS)),s) +ifndef V +## common + QUIET_SUBDIR0 = +@subdir= + QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ + $(MAKE) $(PRINT_DIR) -C $$subdir + + QUIET = @ + QUIET_GEN = @echo ' ' GEN $@; + + QUIET_MKDIR_P_PARENT = @echo $(wspfx_SQ) MKDIR -p $(@D); + +## Used in "Makefile" + QUIET_CC = @echo ' ' CC $@; + QUIET_AR = @echo ' ' AR $@; + QUIET_LINK = @echo ' ' LINK $@; + QUIET_BUILT_IN = @echo ' ' BUILTIN $@; + QUIET_LNCP = @echo ' ' LN/CP $@; + QUIET_XGETTEXT = @echo ' ' XGETTEXT $@; + QUIET_MSGFMT = @echo ' ' MSGFMT $@; + QUIET_GCOV = @echo ' ' GCOV $@; + QUIET_SP = @echo ' ' SP $<; + QUIET_HDR = @echo ' ' HDR $(<:hcc=h); + QUIET_RC = @echo ' ' RC $@; + QUIET_SPATCH = @echo ' ' SPATCH $<; + +## Used in "Documentation/Makefile" + QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@; + QUIET_XMLTO = @echo ' ' XMLTO $@; + QUIET_DB2TEXI = @echo ' ' DB2TEXI $@; + QUIET_MAKEINFO = @echo ' ' MAKEINFO $@; + QUIET_DBLATEX = @echo ' ' DBLATEX $@; + QUIET_XSLTPROC = @echo ' ' XSLTPROC $@; + QUIET_GEN = @echo ' ' GEN $@; + QUIET_STDERR = 2> /dev/null + + QUIET_LINT_GITLINK = @echo ' ' LINT GITLINK $<; + QUIET_LINT_MANSEC = @echo ' ' LINT MAN SEC $<; + QUIET_LINT_MANEND = @echo ' ' LINT MAN END $<; + + export V +endif +endif + +### Templates + +## mkdir_p_parent: lazily "mkdir -p" the path needed for a $@ +## file. Uses $(wildcard) to avoid the "mkdir -p" if it's not +## needed. +## +## Is racy, but in a good way; we might redundantly (and safely) +## "mkdir -p" when running in parallel, but won't need to exhaustively create +## individual rules for "a" -> "prefix" -> "dir" -> "file" if given a +## "a/prefix/dir/file". This can instead be inserted at the start of +## the "a/prefix/dir/file" rule. +define mkdir_p_parent_template +$(if $(wildcard $(@D)),,$(QUIET_MKDIR_P_PARENT)$(shell mkdir -p $(@D))) +endef diff --git a/sparse-index.c b/sparse-index.c index fdbe97b976..8636af72de 100644 --- a/sparse-index.c +++ b/sparse-index.c @@ -337,6 +337,80 @@ void ensure_correct_sparsity(struct index_state *istate) ensure_full_index(istate); } +static int path_found(const char *path, const char **dirname, size_t *dir_len, + int *dir_found) +{ + struct stat st; + char *newdir; + char *tmp; + + /* + * If dirname corresponds to a directory that doesn't exist, and this + * path starts with dirname, then path can't exist. + */ + if (!*dir_found && !memcmp(path, *dirname, *dir_len)) + return 0; + + /* + * If path itself exists, return 1. + */ + if (!lstat(path, &st)) + return 1; + + /* + * Otherwise, path does not exist so we'll return 0...but we'll first + * determine some info about its parent directory so we can avoid + * lstat calls for future cache entries. + */ + newdir = strrchr(path, '/'); + if (!newdir) + return 0; /* Didn't find a parent dir; just return 0 now. */ + + /* + * If path starts with directory (which we already lstat'ed and found), + * then no need to lstat parent directory again. + */ + if (*dir_found && *dirname && memcmp(path, *dirname, *dir_len)) + return 0; + + /* Free previous dirname, and cache path's dirname */ + *dirname = path; + *dir_len = newdir - path + 1; + + tmp = xstrndup(path, *dir_len); + *dir_found = !lstat(tmp, &st); + free(tmp); + + return 0; +} + +void clear_skip_worktree_from_present_files(struct index_state *istate) +{ + const char *last_dirname = NULL; + size_t dir_len = 0; + int dir_found = 1; + + int i; + + if (!core_apply_sparse_checkout || + sparse_expect_files_outside_of_patterns) + return; + +restart: + for (i = 0; i < istate->cache_nr; i++) { + struct cache_entry *ce = istate->cache[i]; + + if (ce_skip_worktree(ce) && + path_found(ce->name, &last_dirname, &dir_len, &dir_found)) { + if (S_ISSPARSEDIR(ce->ce_mode)) { + ensure_full_index(istate); + goto restart; + } + ce->ce_flags &= ~CE_SKIP_WORKTREE; + } + } +} + /* * This static global helps avoid infinite recursion between * expand_to_path() and index_file_exists(). diff --git a/sparse-index.h b/sparse-index.h index 656bd835b2..633d4fb7e3 100644 --- a/sparse-index.h +++ b/sparse-index.h @@ -5,6 +5,7 @@ struct index_state; #define SPARSE_INDEX_MEMORY_ONLY (1 << 0) int convert_to_sparse(struct index_state *istate, int flags); void ensure_correct_sparsity(struct index_state *istate); +void clear_skip_worktree_from_present_files(struct index_state *istate); /* * Some places in the codebase expect to search for a specific path. @@ -875,9 +875,9 @@ static void strbuf_humanise(struct strbuf *buf, off_t bytes, strbuf_addf(buf, humanise_rate == 0 ? /* TRANSLATORS: IEC 80000-13:2008 byte */ - Q_("%u byte", "%u bytes", (unsigned)bytes) : + Q_("%u byte", "%u bytes", bytes) : /* TRANSLATORS: IEC 80000-13:2008 byte/second */ - Q_("%u byte/s", "%u bytes/s", (unsigned)bytes), + Q_("%u byte/s", "%u bytes/s", bytes), (unsigned)bytes); } } diff --git a/string-list.h b/string-list.h index 267d6e5769..d5a744e143 100644 --- a/string-list.h +++ b/string-list.h @@ -86,7 +86,8 @@ typedef int (*compare_strings_fn)(const char *, const char *); */ struct string_list { struct string_list_item *items; - unsigned int nr, alloc; + size_t nr; + size_t alloc; unsigned int strdup_strings:1; compare_strings_fn cmp; /* NULL uses strcmp() */ }; diff --git a/submodule.c b/submodule.c index 5ace18a7d9..86c8f0f89d 100644 --- a/submodule.c +++ b/submodule.c @@ -22,6 +22,7 @@ #include "parse-options.h" #include "object-store.h" #include "commit-reach.h" +#include "shallow.h" static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF; static int initialized_fetch_ref_tips; @@ -167,26 +168,6 @@ void stage_updated_gitmodules(struct index_state *istate) static struct string_list added_submodule_odb_paths = STRING_LIST_INIT_NODUP; -/* TODO: remove this function, use repo_submodule_init instead. */ -int add_submodule_odb(const char *path) -{ - struct strbuf objects_directory = STRBUF_INIT; - int ret = 0; - - ret = strbuf_git_path_submodule(&objects_directory, path, "objects/"); - if (ret) - goto done; - if (!is_directory(objects_directory.buf)) { - ret = -1; - goto done; - } - string_list_insert(&added_submodule_odb_paths, - strbuf_detach(&objects_directory, NULL)); -done: - strbuf_release(&objects_directory); - return ret; -} - void add_submodule_odb_by_path(const char *path) { string_list_insert(&added_submodule_odb_paths, xstrdup(path)); @@ -782,19 +763,6 @@ const struct submodule *submodule_from_ce(const struct cache_entry *ce) return submodule_from_path(the_repository, null_oid(), ce->name); } -static struct oid_array *submodule_commits(struct string_list *submodules, - const char *name) -{ - struct string_list_item *item; - - item = string_list_insert(submodules, name); - if (item->util) - return (struct oid_array *) item->util; - - /* NEEDSWORK: should we have oid_array_init()? */ - item->util = xcalloc(1, sizeof(struct oid_array)); - return (struct oid_array *) item->util; -} struct collect_changed_submodules_cb_data { struct repository *repo; @@ -819,6 +787,52 @@ static const char *default_name_or_path(const char *path_or_name) return path_or_name; } +/* + * Holds relevant information for a changed submodule. Used as the .util + * member of the changed submodule name string_list_item. + * + * (super_oid, path) allows the submodule config to be read from _some_ + * .gitmodules file. We store this information the first time we find a + * superproject commit that points to the submodule, but this is + * arbitrary - we can choose any (super_oid, path) that matches the + * submodule's name. + * + * NEEDSWORK: Storing an arbitrary commit is undesirable because we can't + * guarantee that we're reading the commit that the user would expect. A better + * scheme would be to just fetch a submodule by its name. This requires two + * steps: + * - Create a function that behaves like repo_submodule_init(), but accepts a + * submodule name instead of treeish_name and path. This should be easy + * because repo_submodule_init() internally uses the submodule's name. + * + * - Replace most instances of 'struct submodule' (which is the .gitmodules + * config) with just the submodule name. This is OK because we expect + * submodule settings to be stored in .git/config (via "git submodule init"), + * not .gitmodules. This also lets us delete get_non_gitmodules_submodule(), + * which constructs a bogus 'struct submodule' for the sake of giving a + * placeholder name to a gitlink. + */ +struct changed_submodule_data { + /* + * The first superproject commit in the rev walk that points to + * the submodule. + */ + const struct object_id *super_oid; + /* + * Path to the submodule in the superproject commit referenced + * by 'super_oid'. + */ + char *path; + /* The submodule commits that have changed in the rev walk. */ + struct oid_array new_commits; +}; + +static void changed_submodule_data_clear(struct changed_submodule_data *cs_data) +{ + oid_array_clear(&cs_data->new_commits); + free(cs_data->path); +} + static void collect_changed_submodules_cb(struct diff_queue_struct *q, struct diff_options *options, void *data) @@ -830,9 +844,10 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q, for (i = 0; i < q->nr; i++) { struct diff_filepair *p = q->queue[i]; - struct oid_array *commits; const struct submodule *submodule; const char *name; + struct string_list_item *item; + struct changed_submodule_data *cs_data; if (!S_ISGITLINK(p->two->mode)) continue; @@ -859,8 +874,16 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q, if (!name) continue; - commits = submodule_commits(changed, name); - oid_array_append(commits, &p->two->oid); + item = string_list_insert(changed, name); + if (item->util) + cs_data = item->util; + else { + item->util = xcalloc(1, sizeof(struct changed_submodule_data)); + cs_data = item->util; + cs_data->super_oid = commit_oid; + cs_data->path = xstrdup(p->two->path); + } + oid_array_append(&cs_data->new_commits, &p->two->oid); } } @@ -907,11 +930,12 @@ static void collect_changed_submodules(struct repository *r, reset_revision_walk(); } -static void free_submodules_oids(struct string_list *submodules) +static void free_submodules_data(struct string_list *submodules) { struct string_list_item *item; for_each_string_list_item(item, submodules) - oid_array_clear((struct oid_array *) item->util); + changed_submodule_data_clear(item->util); + string_list_clear(submodules, 1); } @@ -932,6 +956,7 @@ struct has_commit_data { struct repository *repo; int result; const char *path; + const struct object_id *super_oid; }; static int check_has_commit(const struct object_id *oid, void *data) @@ -940,9 +965,10 @@ static int check_has_commit(const struct object_id *oid, void *data) struct repository subrepo; enum object_type type; - if (repo_submodule_init(&subrepo, cb->repo, cb->path, null_oid())) { + if (repo_submodule_init(&subrepo, cb->repo, cb->path, cb->super_oid)) { cb->result = 0; - goto cleanup; + /* subrepo failed to init, so don't clean it up. */ + return 0; } type = oid_object_info(&subrepo, oid, NULL); @@ -968,21 +994,15 @@ cleanup: static int submodule_has_commits(struct repository *r, const char *path, + const struct object_id *super_oid, struct oid_array *commits) { - struct has_commit_data has_commit = { r, 1, path }; - - /* - * Perform a cheap, but incorrect check for the existence of 'commits'. - * This is done by adding the submodule's object store to the in-core - * object store, and then querying for each commit's existence. If we - * do not have the commit object anywhere, there is no chance we have - * it in the object store of the correct submodule and have it - * reachable from a ref, so we can fail early without spawning rev-list - * which is expensive. - */ - if (add_submodule_odb(path)) - return 0; + struct has_commit_data has_commit = { + .repo = r, + .result = 1, + .path = path, + .super_oid = super_oid + }; oid_array_for_each_unique(commits, check_has_commit, &has_commit); @@ -1017,7 +1037,7 @@ static int submodule_needs_pushing(struct repository *r, const char *path, struct oid_array *commits) { - if (!submodule_has_commits(r, path, commits)) + if (!submodule_has_commits(r, path, null_oid(), commits)) /* * NOTE: We do consider it safe to return "no" here. The * correct answer would be "We do not know" instead of @@ -1077,7 +1097,7 @@ int find_unpushed_submodules(struct repository *r, collect_changed_submodules(r, &submodules, &argv); for_each_string_list_item(name, &submodules) { - struct oid_array *commits = name->util; + struct changed_submodule_data *cs_data = name->util; const struct submodule *submodule; const char *path = NULL; @@ -1090,11 +1110,11 @@ int find_unpushed_submodules(struct repository *r, if (!path) continue; - if (submodule_needs_pushing(r, path, commits)) + if (submodule_needs_pushing(r, path, &cs_data->new_commits)) string_list_insert(needs_pushing, path); } - free_submodules_oids(&submodules); + free_submodules_data(&submodules); strvec_clear(&argv); return needs_pushing->nr; @@ -1240,14 +1260,36 @@ void check_for_new_submodule_commits(struct object_id *oid) oid_array_append(&ref_tips_after_fetch, oid); } +/* + * Returns 1 if there is at least one submodule gitdir in + * $GIT_DIR/modules and 0 otherwise. This follows + * submodule_name_to_gitdir(), which looks for submodules in + * $GIT_DIR/modules, not $GIT_COMMON_DIR. + * + * A submodule can be moved to $GIT_DIR/modules manually by running "git + * submodule absorbgitdirs", or it may be initialized there by "git + * submodule update". + */ +static int repo_has_absorbed_submodules(struct repository *r) +{ + int ret; + struct strbuf buf = STRBUF_INIT; + + strbuf_repo_git_path(&buf, r, "modules/"); + ret = file_exists(buf.buf) && !is_empty_dir(buf.buf); + strbuf_release(&buf); + return ret; +} + static void calculate_changed_submodule_paths(struct repository *r, struct string_list *changed_submodule_names) { struct strvec argv = STRVEC_INIT; struct string_list_item *name; - /* No need to check if there are no submodules configured */ - if (!submodule_from_path(r, NULL, NULL)) + /* No need to check if no submodules would be fetched */ + if (!submodule_from_path(r, NULL, NULL) && + !repo_has_absorbed_submodules(r)) return; strvec_push(&argv, "--"); /* argv[0] program name */ @@ -1264,7 +1306,7 @@ static void calculate_changed_submodule_paths(struct repository *r, collect_changed_submodules(r, changed_submodule_names, &argv); for_each_string_list_item(name, changed_submodule_names) { - struct oid_array *commits = name->util; + struct changed_submodule_data *cs_data = name->util; const struct submodule *submodule; const char *path = NULL; @@ -1277,8 +1319,8 @@ static void calculate_changed_submodule_paths(struct repository *r, if (!path) continue; - if (submodule_has_commits(r, path, commits)) { - oid_array_clear(commits); + if (submodule_has_commits(r, path, null_oid(), &cs_data->new_commits)) { + changed_submodule_data_clear(cs_data); *name->string = '\0'; } } @@ -1315,12 +1357,21 @@ int submodule_touches_in_range(struct repository *r, strvec_clear(&args); - free_submodules_oids(&subs); + free_submodules_data(&subs); return ret; } struct submodule_parallel_fetch { - int count; + /* + * The index of the last index entry processed by + * get_fetch_task_from_index(). + */ + int index_count; + /* + * The index of the last string_list entry processed by + * get_fetch_task_from_changed(). + */ + int changed_count; struct strvec args; struct repository *r; const char *prefix; @@ -1329,7 +1380,16 @@ struct submodule_parallel_fetch { int quiet; int result; + /* + * Names of submodules that have new commits. Generated by + * walking the newly fetched superproject commits. + */ struct string_list changed_submodule_names; + /* + * Names of submodules that have already been processed. Lets us + * avoid fetching the same submodule more than once. + */ + struct string_list seen_submodule_names; /* Pending fetches by OIDs */ struct fetch_task **oid_fetch_tasks; @@ -1340,6 +1400,7 @@ struct submodule_parallel_fetch { #define SPF_INIT { \ .args = STRVEC_INIT, \ .changed_submodule_names = STRING_LIST_INIT_DUP, \ + .seen_submodule_names = STRING_LIST_INIT_DUP, \ .submodules_with_errors = STRBUF_INIT, \ } @@ -1376,6 +1437,8 @@ struct fetch_task { struct repository *repo; const struct submodule *sub; unsigned free_sub : 1; /* Do we need to free the submodule? */ + const char *default_argv; /* The default fetch mode. */ + struct strvec git_args; /* Args for the child git process. */ struct oid_array *commits; /* Ensure these commits are fetched */ }; @@ -1401,31 +1464,6 @@ static const struct submodule *get_non_gitmodules_submodule(const char *path) return (const struct submodule *) ret; } -static struct fetch_task *fetch_task_create(struct repository *r, - const char *path) -{ - struct fetch_task *task = xmalloc(sizeof(*task)); - memset(task, 0, sizeof(*task)); - - task->sub = submodule_from_path(r, null_oid(), path); - if (!task->sub) { - /* - * No entry in .gitmodules? Technically not a submodule, - * but historically we supported repositories that happen to be - * in-place where a gitlink is. Keep supporting them. - */ - task->sub = get_non_gitmodules_submodule(path); - if (!task->sub) { - free(task); - return NULL; - } - - task->free_sub = 1; - } - - return task; -} - static void fetch_task_release(struct fetch_task *p) { if (p->free_sub) @@ -1436,14 +1474,17 @@ static void fetch_task_release(struct fetch_task *p) if (p->repo) repo_clear(p->repo); FREE_AND_NULL(p->repo); + + strvec_clear(&p->git_args); } static struct repository *get_submodule_repo_for(struct repository *r, - const char *path) + const char *path, + const struct object_id *treeish_name) { struct repository *ret = xmalloc(sizeof(*ret)); - if (repo_submodule_init(ret, r, path, null_oid())) { + if (repo_submodule_init(ret, r, path, treeish_name)) { free(ret); return NULL; } @@ -1451,67 +1492,83 @@ static struct repository *get_submodule_repo_for(struct repository *r, return ret; } -static int get_next_submodule(struct child_process *cp, - struct strbuf *err, void *data, void **task_cb) +static struct fetch_task *fetch_task_create(struct submodule_parallel_fetch *spf, + const char *path, + const struct object_id *treeish_name) { - struct submodule_parallel_fetch *spf = data; + struct fetch_task *task = xmalloc(sizeof(*task)); + memset(task, 0, sizeof(*task)); + + task->sub = submodule_from_path(spf->r, treeish_name, path); - for (; spf->count < spf->r->index->cache_nr; spf->count++) { - const struct cache_entry *ce = spf->r->index->cache[spf->count]; - const char *default_argv; + if (!task->sub) { + /* + * No entry in .gitmodules? Technically not a submodule, + * but historically we supported repositories that happen to be + * in-place where a gitlink is. Keep supporting them. + */ + task->sub = get_non_gitmodules_submodule(path); + if (!task->sub) + goto cleanup; + + task->free_sub = 1; + } + + if (string_list_lookup(&spf->seen_submodule_names, task->sub->name)) + goto cleanup; + + switch (get_fetch_recurse_config(task->sub, spf)) + { + default: + case RECURSE_SUBMODULES_DEFAULT: + case RECURSE_SUBMODULES_ON_DEMAND: + if (!task->sub || + !string_list_lookup( + &spf->changed_submodule_names, + task->sub->name)) + goto cleanup; + task->default_argv = "on-demand"; + break; + case RECURSE_SUBMODULES_ON: + task->default_argv = "yes"; + break; + case RECURSE_SUBMODULES_OFF: + goto cleanup; + } + + task->repo = get_submodule_repo_for(spf->r, path, treeish_name); + + return task; + + cleanup: + fetch_task_release(task); + free(task); + return NULL; +} + +static struct fetch_task * +get_fetch_task_from_index(struct submodule_parallel_fetch *spf, + struct strbuf *err) +{ + for (; spf->index_count < spf->r->index->cache_nr; spf->index_count++) { + const struct cache_entry *ce = + spf->r->index->cache[spf->index_count]; struct fetch_task *task; if (!S_ISGITLINK(ce->ce_mode)) continue; - task = fetch_task_create(spf->r, ce->name); + task = fetch_task_create(spf, ce->name, null_oid()); if (!task) continue; - switch (get_fetch_recurse_config(task->sub, spf)) - { - default: - case RECURSE_SUBMODULES_DEFAULT: - case RECURSE_SUBMODULES_ON_DEMAND: - if (!task->sub || - !string_list_lookup( - &spf->changed_submodule_names, - task->sub->name)) - continue; - default_argv = "on-demand"; - break; - case RECURSE_SUBMODULES_ON: - default_argv = "yes"; - break; - case RECURSE_SUBMODULES_OFF: - continue; - } - - task->repo = get_submodule_repo_for(spf->r, task->sub->path); if (task->repo) { - struct strbuf submodule_prefix = STRBUF_INIT; - child_process_init(cp); - cp->dir = task->repo->gitdir; - prepare_submodule_repo_env_in_gitdir(&cp->env_array); - cp->git_cmd = 1; if (!spf->quiet) strbuf_addf(err, _("Fetching submodule %s%s\n"), spf->prefix, ce->name); - strvec_init(&cp->args); - strvec_pushv(&cp->args, spf->args.v); - strvec_push(&cp->args, default_argv); - strvec_push(&cp->args, "--submodule-prefix"); - strbuf_addf(&submodule_prefix, "%s%s/", - spf->prefix, - task->sub->path); - strvec_push(&cp->args, submodule_prefix.buf); - - spf->count++; - *task_cb = task; - - strbuf_release(&submodule_prefix); - return 1; + spf->index_count++; + return task; } else { struct strbuf empty_submodule_path = STRBUF_INIT; @@ -1535,6 +1592,111 @@ static int get_next_submodule(struct child_process *cp, strbuf_release(&empty_submodule_path); } } + return NULL; +} + +static struct fetch_task * +get_fetch_task_from_changed(struct submodule_parallel_fetch *spf, + struct strbuf *err) +{ + for (; spf->changed_count < spf->changed_submodule_names.nr; + spf->changed_count++) { + struct string_list_item item = + spf->changed_submodule_names.items[spf->changed_count]; + struct changed_submodule_data *cs_data = item.util; + struct fetch_task *task; + + if (!is_tree_submodule_active(spf->r, cs_data->super_oid,cs_data->path)) + continue; + + task = fetch_task_create(spf, cs_data->path, + cs_data->super_oid); + if (!task) + continue; + + if (!task->repo) { + strbuf_addf(err, _("Could not access submodule '%s' at commit %s\n"), + cs_data->path, + find_unique_abbrev(cs_data->super_oid, DEFAULT_ABBREV)); + + fetch_task_release(task); + free(task); + continue; + } + + if (!spf->quiet) + strbuf_addf(err, + _("Fetching submodule %s%s at commit %s\n"), + spf->prefix, task->sub->path, + find_unique_abbrev(cs_data->super_oid, + DEFAULT_ABBREV)); + + spf->changed_count++; + /* + * NEEDSWORK: Submodules set/unset a value for + * core.worktree when they are populated/unpopulated by + * "git checkout" (and similar commands, see + * submodule_move_head() and + * connect_work_tree_and_git_dir()), but if the + * submodule is unpopulated in another way (e.g. "git + * rm", "rm -r"), core.worktree will still be set even + * though the directory doesn't exist, and the child + * process will crash while trying to chdir into the + * nonexistent directory. + * + * In this case, we know that the submodule has no + * working tree, so we can work around this by + * setting "--work-tree=." (--bare does not work because + * worktree settings take precedence over bare-ness). + * However, this is not necessarily true in other cases, + * so a generalized solution is still necessary. + * + * Possible solutions: + * - teach "git [add|rm]" to unset core.worktree and + * discourage users from removing submodules without + * using a Git command. + * - teach submodule child processes to ignore stale + * core.worktree values. + */ + strvec_push(&task->git_args, "--work-tree=."); + return task; + } + return NULL; +} + +static int get_next_submodule(struct child_process *cp, struct strbuf *err, + void *data, void **task_cb) +{ + struct submodule_parallel_fetch *spf = data; + struct fetch_task *task = + get_fetch_task_from_index(spf, err); + if (!task) + task = get_fetch_task_from_changed(spf, err); + + if (task) { + struct strbuf submodule_prefix = STRBUF_INIT; + + child_process_init(cp); + cp->dir = task->repo->gitdir; + prepare_submodule_repo_env_in_gitdir(&cp->env_array); + cp->git_cmd = 1; + strvec_init(&cp->args); + if (task->git_args.nr) + strvec_pushv(&cp->args, task->git_args.v); + strvec_pushv(&cp->args, spf->args.v); + strvec_push(&cp->args, task->default_argv); + strvec_push(&cp->args, "--submodule-prefix"); + + strbuf_addf(&submodule_prefix, "%s%s/", + spf->prefix, + task->sub->path); + strvec_push(&cp->args, submodule_prefix.buf); + *task_cb = task; + + strbuf_release(&submodule_prefix); + string_list_insert(&spf->seen_submodule_names, task->sub->name); + return 1; + } if (spf->oid_fetch_tasks_nr) { struct fetch_task *task = @@ -1597,7 +1759,7 @@ static int fetch_finish(int retvalue, struct strbuf *err, struct fetch_task *task = task_cb; struct string_list_item *it; - struct oid_array *commits; + struct changed_submodule_data *cs_data; if (!task || !task->sub) BUG("callback cookie bogus"); @@ -1625,14 +1787,14 @@ static int fetch_finish(int retvalue, struct strbuf *err, /* Could be an unchanged submodule, not contained in the list */ goto out; - commits = it->util; - oid_array_filter(commits, + cs_data = it->util; + oid_array_filter(&cs_data->new_commits, commit_missing_in_sub, task->repo); /* Are there commits we want, but do not exist? */ - if (commits->nr) { - task->commits = commits; + if (cs_data->new_commits.nr) { + task->commits = &cs_data->new_commits; ALLOC_GROW(spf->oid_fetch_tasks, spf->oid_fetch_tasks_nr + 1, spf->oid_fetch_tasks_alloc); @@ -1647,11 +1809,11 @@ out: return 0; } -int fetch_populated_submodules(struct repository *r, - const struct strvec *options, - const char *prefix, int command_line_option, - int default_option, - int quiet, int max_parallel_jobs) +int fetch_submodules(struct repository *r, + const struct strvec *options, + const char *prefix, int command_line_option, + int default_option, + int quiet, int max_parallel_jobs) { int i; struct submodule_parallel_fetch spf = SPF_INIT; @@ -1690,7 +1852,7 @@ int fetch_populated_submodules(struct repository *r, strvec_clear(&spf.args); out: - free_submodules_oids(&spf.changed_submodule_names); + free_submodules_data(&spf.changed_submodule_names); return spf.result; } diff --git a/submodule.h b/submodule.h index 784ceffc0e..40c1445237 100644 --- a/submodule.h +++ b/submodule.h @@ -88,12 +88,12 @@ int should_update_submodules(void); */ const struct submodule *submodule_from_ce(const struct cache_entry *ce); void check_for_new_submodule_commits(struct object_id *oid); -int fetch_populated_submodules(struct repository *r, - const struct strvec *options, - const char *prefix, - int command_line_option, - int default_option, - int quiet, int max_parallel_jobs); +int fetch_submodules(struct repository *r, + const struct strvec *options, + const char *prefix, + int command_line_option, + int default_option, + int quiet, int max_parallel_jobs); unsigned is_submodule_modified(const char *path, int ignore_untracked); int submodule_uses_gitfile(const char *path); @@ -103,12 +103,11 @@ int submodule_uses_gitfile(const char *path); int bad_to_remove_submodule(const char *path, unsigned flags); /* - * Call add_submodule_odb() to add the submodule at the given path to a list. - * When register_all_submodule_odb_as_alternates() is called, the object stores - * of all submodules in that list will be added as alternates in - * the_repository. + * Call add_submodule_odb_by_path() to add the submodule at the given + * path to a list. When register_all_submodule_odb_as_alternates() is + * called, the object stores of all submodules in that list will be + * added as alternates in the_repository. */ -int add_submodule_odb(const char *path); void add_submodule_odb_by_path(const char *path); int register_all_submodule_odb_as_alternates(void); diff --git a/t/Makefile b/t/Makefile index 46cd5fc527..056ce55dcc 100644 --- a/t/Makefile +++ b/t/Makefile @@ -1,3 +1,6 @@ +# Import tree-wide shared Makefile behavior and libraries +include ../shared.mak + # Run tests # # Copyright (c) 2005 Junio C Hamano diff --git a/t/helper/test-read-graph.c b/t/helper/test-read-graph.c index 75927b2c81..98b73bb8f2 100644 --- a/t/helper/test-read-graph.c +++ b/t/helper/test-read-graph.c @@ -3,6 +3,7 @@ #include "commit-graph.h" #include "repository.h" #include "object-store.h" +#include "bloom.h" int cmd__read_graph(int argc, const char **argv) { @@ -45,6 +46,18 @@ int cmd__read_graph(int argc, const char **argv) printf(" bloom_data"); printf("\n"); + printf("options:"); + if (graph->bloom_filter_settings) + printf(" bloom(%"PRIu32",%"PRIu32",%"PRIu32")", + graph->bloom_filter_settings->hash_version, + graph->bloom_filter_settings->bits_per_entry, + graph->bloom_filter_settings->num_hashes); + if (graph->read_generation_data) + printf(" read_generation_data"); + if (graph->topo_levels) + printf(" topo_levels"); + printf("\n"); + UNLEAK(graph); return 0; diff --git a/t/helper/test-run-command.c b/t/helper/test-run-command.c index 8f370cd89f..f3b90aa834 100644 --- a/t/helper/test-run-command.c +++ b/t/helper/test-run-command.c @@ -19,7 +19,6 @@ #include "thread-utils.h" #include "wildmatch.h" #include "gettext.h" -#include "parse-options.h" static int number_callbacks; static int parallel_next(struct child_process *cp, @@ -180,15 +179,16 @@ static int testsuite(int argc, const char **argv) if (max_jobs > suite.tests.nr) max_jobs = suite.tests.nr; - fprintf(stderr, "Running %d tests (%d at a time)\n", - suite.tests.nr, max_jobs); + fprintf(stderr, "Running %"PRIuMAX" tests (%d at a time)\n", + (uintmax_t)suite.tests.nr, max_jobs); ret = run_processes_parallel(max_jobs, next_test, test_failed, test_finished, &suite); if (suite.failed.nr > 0) { ret = 1; - fprintf(stderr, "%d tests failed:\n\n", suite.failed.nr); + fprintf(stderr, "%"PRIuMAX" tests failed:\n\n", + (uintmax_t)suite.failed.nr); for (i = 0; i < suite.failed.nr; i++) fprintf(stderr, "\t%s\n", suite.failed.items[i].string); } diff --git a/t/interop/Makefile b/t/interop/Makefile index 31a4bbc716..6911c2915a 100644 --- a/t/interop/Makefile +++ b/t/interop/Makefile @@ -1,3 +1,6 @@ +# Import tree-wide shared Makefile behavior and libraries +include ../../shared.mak + -include ../../config.mak export GIT_TEST_OPTIONS diff --git a/t/lib-commit-graph.sh b/t/lib-commit-graph.sh new file mode 100755 index 0000000000..5d79e1a4e9 --- /dev/null +++ b/t/lib-commit-graph.sh @@ -0,0 +1,58 @@ +#!/bin/sh + +# Helper functions for testing commit-graphs. + +# Initialize OID cache with oid_version +test_oid_cache <<-EOF +oid_version sha1:1 +oid_version sha256:2 +EOF + +graph_git_two_modes() { + git -c core.commitGraph=true $1 >output && + git -c core.commitGraph=false $1 >expect && + test_cmp expect output +} + +graph_git_behavior() { + MSG=$1 + DIR=$2 + BRANCH=$3 + COMPARE=$4 + test_expect_success "check normal git operations: $MSG" ' + cd "$TRASH_DIRECTORY/$DIR" && + graph_git_two_modes "log --oneline $BRANCH" && + graph_git_two_modes "log --topo-order $BRANCH" && + graph_git_two_modes "log --graph $COMPARE..$BRANCH" && + graph_git_two_modes "branch -vv" && + graph_git_two_modes "merge-base -a $BRANCH $COMPARE" + ' +} + +graph_read_expect() { + OPTIONAL="" + NUM_CHUNKS=3 + if test -n "$2" + then + OPTIONAL=" $2" + NUM_CHUNKS=$((3 + $(echo "$2" | wc -w))) + fi + GENERATION_VERSION=2 + if test -n "$3" + then + GENERATION_VERSION=$3 + fi + OPTIONS= + if test $GENERATION_VERSION -gt 1 + then + OPTIONS=" read_generation_data" + fi + cat >expect <<- EOF + header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0 + num_commits: $1 + chunks: oid_fanout oid_lookup commit_metadata$OPTIONAL + options:$OPTIONS + EOF + test-tool read-graph >output && + test_cmp expect output +} diff --git a/t/lib-gpg.sh b/t/lib-gpg.sh index 3e7ee1386a..114785586a 100644 --- a/t/lib-gpg.sh +++ b/t/lib-gpg.sh @@ -40,7 +40,7 @@ test_lazy_prereq GPG ' # > lib-gpg/ownertrust mkdir "$GNUPGHOME" && chmod 0700 "$GNUPGHOME" && - (gpgconf --kill gpg-agent || : ) && + (gpgconf --kill all || : ) && gpg --homedir "${GNUPGHOME}" --import \ "$TEST_DIRECTORY"/lib-gpg/keyring.gpg && gpg --homedir "${GNUPGHOME}" --import-ownertrust \ @@ -72,12 +72,11 @@ test_lazy_prereq GPGSM ' --passphrase-fd 0 --pinentry-mode loopback \ --import "$TEST_DIRECTORY"/lib-gpg/gpgsm_cert.p12 && - gpgsm --homedir "${GNUPGHOME}" -K | - grep fingerprint: | - cut -d" " -f4 | - tr -d "\\n" >"${GNUPGHOME}/trustlist.txt" && + gpgsm --homedir "${GNUPGHOME}" -K --with-colons | + awk -F ":" "/^fpr:/ {printf \"%s S relax\\n\", \$10}" \ + >"${GNUPGHOME}/trustlist.txt" && + (gpgconf --reload all || : ) && - echo " S relax" >>"${GNUPGHOME}/trustlist.txt" && echo hello | gpgsm --homedir "${GNUPGHOME}" >/dev/null \ -u committer@example.com -o /dev/null --sign - ' diff --git a/t/perf/Makefile b/t/perf/Makefile index 2465770a78..e4808aebed 100644 --- a/t/perf/Makefile +++ b/t/perf/Makefile @@ -1,3 +1,6 @@ +# Import tree-wide shared Makefile behavior and libraries +include ../../shared.mak + -include ../../config.mak export GIT_TEST_OPTIONS diff --git a/t/perf/p1006-cat-file.sh b/t/perf/p1006-cat-file.sh new file mode 100755 index 0000000000..dcd8015379 --- /dev/null +++ b/t/perf/p1006-cat-file.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +test_description='Tests listing object info performance' +. ./perf-lib.sh + +test_perf_large_repo + +test_perf 'cat-file --batch-check' ' + git cat-file --batch-all-objects --batch-check +' + +test_done diff --git a/t/perf/p2000-sparse-operations.sh b/t/perf/p2000-sparse-operations.sh index 2a7106b949..382716cfca 100755 --- a/t/perf/p2000-sparse-operations.sh +++ b/t/perf/p2000-sparse-operations.sh @@ -117,6 +117,7 @@ test_perf_on_all git diff test_perf_on_all git diff --cached test_perf_on_all git blame $SPARSE_CONE/a test_perf_on_all git blame $SPARSE_CONE/f3/a +test_perf_on_all git read-tree -mu HEAD test_perf_on_all git checkout-index -f --all test_perf_on_all git update-index --add --remove $SPARSE_CONE/a diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh index b007f0efef..9dcbf518a7 100755 --- a/t/t0000-basic.sh +++ b/t/t0000-basic.sh @@ -1089,7 +1089,8 @@ test_expect_success 'update-index D/F conflict' ' mv path2 path0 && mv tmp path2 && git update-index --add --replace path2 path0/file2 && - numpath0=$(git ls-files path0 | wc -l) && + git ls-files path0 >tmp && + numpath0=$(wc -l <tmp) && test $numpath0 = 1 ' @@ -1103,13 +1104,14 @@ test_expect_success 'very long name in the index handled sanely' ' >path4 && git update-index --add path4 && + git ls-files -s path4 >tmp && ( - git ls-files -s path4 | - sed -e "s/ .*/ /" | + sed -e "s/ .*/ /" tmp | tr -d "\012" && echo "$a" ) | git update-index --index-info && - len=$(git ls-files "a*" | wc -c) && + git ls-files "a*" >tmp && + len=$(wc -c <tmp) && test $len = 4098 ' diff --git a/t/t0002-gitfile.sh b/t/t0002-gitfile.sh index 76052cb562..f6356db183 100755 --- a/t/t0002-gitfile.sh +++ b/t/t0002-gitfile.sh @@ -65,9 +65,11 @@ test_expect_success 'check commit-tree' ' test_path_is_file "$REAL/objects/$(objpath $SHA)" ' -test_expect_success 'check rev-list' ' +test_expect_success !SANITIZE_LEAK 'check rev-list' ' git update-ref "HEAD" "$SHA" && - test "$SHA" = "$(git rev-list HEAD)" + git rev-list HEAD >actual && + echo $SHA >expected && + test_cmp expected actual ' test_expect_success 'setup_git_dir twice in subdir' ' diff --git a/t/t0003-attributes.sh b/t/t0003-attributes.sh index b9ed612af1..143f100517 100755 --- a/t/t0003-attributes.sh +++ b/t/t0003-attributes.sh @@ -205,15 +205,18 @@ test_expect_success 'attribute test: read paths from stdin' ' test_expect_success 'attribute test: --all option' ' grep -v unspecified <expect-all | sort >specified-all && sed -e "s/:.*//" <expect-all | uniq >stdin-all && - git check-attr --stdin --all <stdin-all | sort >actual && + git check-attr --stdin --all <stdin-all >tmp && + sort tmp >actual && test_cmp specified-all actual ' test_expect_success 'attribute test: --cached option' ' - git check-attr --cached --stdin --all <stdin-all | sort >actual && + git check-attr --cached --stdin --all <stdin-all >tmp && + sort tmp >actual && test_must_be_empty actual && git add .gitattributes a/.gitattributes a/b/.gitattributes && - git check-attr --cached --stdin --all <stdin-all | sort >actual && + git check-attr --cached --stdin --all <stdin-all >tmp && + sort tmp >actual && test_cmp specified-all actual ' diff --git a/t/t0012-help.sh b/t/t0012-help.sh index cbd725ccac..6c3e1f7159 100755 --- a/t/t0012-help.sh +++ b/t/t0012-help.sh @@ -35,6 +35,9 @@ test_expect_success 'basic help commands' ' ' test_expect_success 'invalid usage' ' + test_expect_code 129 git help -a add && + test_expect_code 129 git help --all add && + test_expect_code 129 git help -g add && test_expect_code 129 git help -a -c && @@ -46,6 +49,29 @@ test_expect_success 'invalid usage' ' test_expect_code 129 git help --config-sections-for-completion add ' +for opt in '-a' '-g' '-c' '--config-for-completion' '--config-sections-for-completion' +do + test_expect_success "invalid usage of '$opt' with [-i|-m|-w]" ' + git help $opt && + test_expect_code 129 git help $opt -i && + test_expect_code 129 git help $opt -m && + test_expect_code 129 git help $opt -w + ' + + if test "$opt" = "-a" + then + continue + fi + + test_expect_success "invalid usage of '$opt' with --no-external-commands" ' + test_expect_code 129 git help $opt --no-external-commands + ' + + test_expect_success "invalid usage of '$opt' with --no-aliases" ' + test_expect_code 129 git help $opt --no-external-commands + ' +done + test_expect_success "works for commands and guides by default" ' configure_help && git help status && @@ -138,6 +164,74 @@ test_expect_success 'git help --config-sections-for-completion' ' test_cmp human.munged sections ' +test_section_spacing () { + cat >expect && + "$@" >out && + grep -E "(^[^ ]|^$)" out >actual +} + +test_section_spacing_trailer () { + test_section_spacing "$@" && + test_expect_code 1 git >out && + sed -n '/list available subcommands/,$p' <out >>expect +} + + +for cmd in git "git help" +do + test_expect_success "'$cmd' section spacing" ' + test_section_spacing_trailer git help <<-\EOF && + usage: git [--version] [--help] [-C <path>] [-c <name>=<value>] + + These are common Git commands used in various situations: + + start a working area (see also: git help tutorial) + + work on the current change (see also: git help everyday) + + examine the history and state (see also: git help revisions) + + grow, mark and tweak your common history + + collaborate (see also: git help workflows) + + EOF + test_cmp expect actual + ' +done + +test_expect_success "'git help -a' section spacing" ' + test_section_spacing \ + git help -a --no-external-commands --no-aliases <<-\EOF && + See '\''git help <command>'\'' to read about a specific subcommand + + Main Porcelain Commands + + Ancillary Commands / Manipulators + + Ancillary Commands / Interrogators + + Interacting with Others + + Low-level Commands / Manipulators + + Low-level Commands / Interrogators + + Low-level Commands / Syncing Repositories + + Low-level Commands / Internal Helpers + EOF + test_cmp expect actual +' + +test_expect_success "'git help -g' section spacing" ' + test_section_spacing_trailer git help -g <<-\EOF && + The Git concept guides are: + + EOF + test_cmp expect actual +' + test_expect_success 'generate builtin list' ' mkdir -p sub && git --list-cmds=builtins >builtins diff --git a/t/t0022-crlf-rename.sh b/t/t0022-crlf-rename.sh index c1a331e9e9..9fe9891251 100755 --- a/t/t0022-crlf-rename.sh +++ b/t/t0022-crlf-rename.sh @@ -24,8 +24,8 @@ test_expect_success setup ' test_expect_success 'diff -M' ' - git diff-tree -M -r --name-status HEAD^ HEAD | - sed -e "s/R[0-9]*/RNUM/" >actual && + git diff-tree -M -r --name-status HEAD^ HEAD >tmp && + sed -e "s/R[0-9]*/RNUM/" tmp >actual && echo "RNUM sample elpmas" >expect && test_cmp expect actual diff --git a/t/t0025-crlf-renormalize.sh b/t/t0025-crlf-renormalize.sh index 81447978b7..f7202c192e 100755 --- a/t/t0025-crlf-renormalize.sh +++ b/t/t0025-crlf-renormalize.sh @@ -22,8 +22,8 @@ test_expect_success 'renormalize CRLF in repo' ' i/lf w/lf attr/text=auto LF.txt i/lf w/mixed attr/text=auto CRLF_mix_LF.txt EOF - git ls-files --eol | - sed -e "s/ / /g" -e "s/ */ /g" | + git ls-files --eol >tmp && + sed -e "s/ / /g" -e "s/ */ /g" tmp | sort >actual && test_cmp expect actual ' diff --git a/t/t0027-auto-crlf.sh b/t/t0027-auto-crlf.sh index c5f7ac63b0..0feb41a23f 100755 --- a/t/t0027-auto-crlf.sh +++ b/t/t0027-auto-crlf.sh @@ -311,8 +311,8 @@ checkout_files () { i/-text w/$(stats_ascii $crlfnul) attr/$(attr_ascii $attr $aeol) crlf_false_attr__CRLF_nul.txt i/-text w/$(stats_ascii $crlfnul) attr/$(attr_ascii $attr $aeol) crlf_false_attr__LF_nul.txt EOF - git ls-files --eol crlf_false_attr__* | - sed -e "s/ / /g" -e "s/ */ /g" | + git ls-files --eol crlf_false_attr__* >tmp && + sed -e "s/ / /g" -e "s/ */ /g" tmp | sort >actual && test_cmp expect actual ' @@ -359,12 +359,12 @@ test_expect_success 'ls-files --eol -o Text/Binary' ' i/ w/crlf TeBi_126_CL i/ w/-text TeBi_126_CLC EOF - git ls-files --eol -o | + git ls-files --eol -o >tmp && sed -n -e "/TeBi_/{s!attr/[ ]*!!g s! ! !g s! *! !g p - }" | sort >actual && + }" tmp | sort >actual && test_cmp expect actual ' @@ -617,8 +617,8 @@ test_expect_success 'ls-files --eol -d -z' ' i/lf w/ crlf_false_attr__LF.txt i/mixed w/ crlf_false_attr__CRLF_mix_LF.txt EOF - git ls-files --eol -d | - sed -e "s!attr/[^ ]*!!g" -e "s/ / /g" -e "s/ */ /g" | + git ls-files --eol -d >tmp && + sed -e "s!attr/[^ ]*!!g" -e "s/ / /g" -e "s/ */ /g" tmp | sort >actual && test_cmp expect actual ' diff --git a/t/t0030-stripspace.sh b/t/t0030-stripspace.sh index ae1ca380c1..0a5713c524 100755 --- a/t/t0030-stripspace.sh +++ b/t/t0030-stripspace.sh @@ -13,6 +13,10 @@ s40=' ' sss="$s40$s40$s40$s40$s40$s40$s40$s40$s40$s40" # 400 ttt="$t40$t40$t40$t40$t40$t40$t40$t40$t40$t40" # 400 +printf_git_stripspace () { + printf "$1" | git stripspace +} + test_expect_success \ 'long lines without spaces should be unchanged' ' echo "$ttt" >expect && @@ -225,32 +229,38 @@ test_expect_success \ test_expect_success \ 'text without newline at end should end with newline' ' - test $(printf "$ttt" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$ttt" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$ttt$ttt" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$ttt$ttt$ttt" | git stripspace | wc -l) -gt 0 + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$ttt" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$ttt$ttt" ' # text plus spaces at the end: test_expect_success \ 'text plus spaces without newline at end should end with newline' ' - test $(printf "$ttt$sss" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$ttt$sss" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$ttt$ttt$sss" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$sss$sss" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$ttt$sss$sss" | git stripspace | wc -l) -gt 0 && - test $(printf "$ttt$sss$sss$sss" | git stripspace | wc -l) -gt 0 + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$sss" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$sss" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$ttt$sss" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$sss$sss" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$ttt$sss$sss" && + test_stdout_line_count -gt 0 printf_git_stripspace "$ttt$sss$sss$sss" ' test_expect_success \ 'text plus spaces without newline at end should not show spaces' ' - ! (printf "$ttt$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$ttt$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$ttt$sss$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$ttt$ttt$sss$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$ttt$sss$sss$sss" | git stripspace | grep " " >/dev/null) + printf "$ttt$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$ttt$ttt$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$ttt$ttt$ttt$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$ttt$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$ttt$ttt$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$ttt$sss$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null ' test_expect_success \ @@ -282,12 +292,18 @@ test_expect_success \ test_expect_success \ 'text plus spaces at end should not show spaces' ' - ! (echo "$ttt$sss" | git stripspace | grep " " >/dev/null) && - ! (echo "$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) && - ! (echo "$ttt$ttt$ttt$sss" | git stripspace | grep " " >/dev/null) && - ! (echo "$ttt$sss$sss" | git stripspace | grep " " >/dev/null) && - ! (echo "$ttt$ttt$sss$sss" | git stripspace | grep " " >/dev/null) && - ! (echo "$ttt$sss$sss$sss" | git stripspace | grep " " >/dev/null) + echo "$ttt$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + echo "$ttt$ttt$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + echo "$ttt$ttt$ttt$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + echo "$ttt$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + echo "$ttt$ttt$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + echo "$ttt$sss$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null ' test_expect_success \ @@ -339,11 +355,16 @@ test_expect_success \ test_expect_success \ 'spaces without newline at end should not show spaces' ' - ! (printf "" | git stripspace | grep " " >/dev/null) && - ! (printf "$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$sss$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$sss$sss$sss" | git stripspace | grep " " >/dev/null) && - ! (printf "$sss$sss$sss$sss" | git stripspace | grep " " >/dev/null) + printf "" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$sss$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null && + printf "$sss$sss$sss$sss" | git stripspace >tmp && + ! grep " " tmp >/dev/null ' test_expect_success \ diff --git a/t/t0050-filesystem.sh b/t/t0050-filesystem.sh index afc343cf9b..5c9dc90d0b 100755 --- a/t/t0050-filesystem.sh +++ b/t/t0050-filesystem.sh @@ -104,7 +104,8 @@ test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' ' rm camelcase && echo 1 >CamelCase && git add CamelCase && - camel=$(git ls-files | grep -i camelcase) && + git ls-files >tmp && + camel=$(grep -i camelcase tmp) && test $(echo "$camel" | wc -l) = 1 && test "z$(git cat-file blob :$camel)" = z1 ' diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh index f17abd298c..1e864cf317 100755 --- a/t/t0410-partial-clone.sh +++ b/t/t0410-partial-clone.sh @@ -618,6 +618,25 @@ test_expect_success 'do not fetch when checking existence of tree we construct o git -C repo cherry-pick side1 ' +test_expect_success 'exact rename does not need to fetch the blob lazily' ' + rm -rf repo partial.git && + test_create_repo repo && + content="some dummy content" && + test_commit -C repo create-a-file file.txt "$content" && + git -C repo mv file.txt new-file.txt && + git -C repo commit -m rename-the-file && + FILE_HASH=$(git -C repo rev-parse HEAD:new-file.txt) && + test_config -C repo uploadpack.allowfilter 1 && + test_config -C repo uploadpack.allowanysha1inwant 1 && + + git clone --filter=blob:none --bare "file://$(pwd)/repo" partial.git && + git -C partial.git rev-list --objects --missing=print HEAD >out && + grep "[?]$FILE_HASH" out && + git -C partial.git log --follow -- new-file.txt && + git -C partial.git rev-list --objects --missing=print HEAD >out && + grep "[?]$FILE_HASH" out +' + test_expect_success 'lazy-fetch when accessing object not in the_repository' ' rm -rf full partial.git && test_create_repo full && diff --git a/t/t1001-read-tree-m-2way.sh b/t/t1001-read-tree-m-2way.sh index d1115528cb..0710b1fb1e 100755 --- a/t/t1001-read-tree-m-2way.sh +++ b/t/t1001-read-tree-m-2way.sh @@ -21,7 +21,6 @@ In the test, these paths are used: yomin - not in H or M ' -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-read-tree.sh @@ -38,11 +37,12 @@ compare_change () { } check_cache_at () { - clean_if_empty=$(git diff-files -- "$1") + git diff-files -- "$1" >out && + clean_if_empty=$(cat out) && case "$clean_if_empty" in '') echo "$1: clean" ;; ?*) echo "$1: dirty" ;; - esac + esac && case "$2,$clean_if_empty" in clean,) : ;; clean,?*) false ;; diff --git a/t/t1002-read-tree-m-u-2way.sh b/t/t1002-read-tree-m-u-2way.sh index ca5c5510c7..46cbd5514a 100755 --- a/t/t1002-read-tree-m-u-2way.sh +++ b/t/t1002-read-tree-m-u-2way.sh @@ -9,7 +9,6 @@ This is identical to t1001, but uses -u to update the work tree as well. ' -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-read-tree.sh @@ -23,11 +22,12 @@ compare_change () { } check_cache_at () { - clean_if_empty=$(git diff-files -- "$1") + git diff-files -- "$1" >out && + clean_if_empty=$(cat out) && case "$clean_if_empty" in '') echo "$1: clean" ;; ?*) echo "$1: dirty" ;; - esac + esac && case "$2,$clean_if_empty" in clean,) : ;; clean,?*) false ;; diff --git a/t/t1003-read-tree-prefix.sh b/t/t1003-read-tree-prefix.sh index e0db2066f3..c860c08ecb 100755 --- a/t/t1003-read-tree-prefix.sh +++ b/t/t1003-read-tree-prefix.sh @@ -25,4 +25,14 @@ test_expect_success 'read-tree --prefix' ' cmp expect actual ' +test_expect_success 'read-tree --prefix with leading slash exits with error' ' + git rm -rf . && + test_must_fail git read-tree --prefix=/two/ $tree && + git read-tree --prefix=two/ $tree && + + git rm -rf . && + test_must_fail git read-tree --prefix=/ $tree && + git read-tree --prefix= $tree +' + test_done diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh index 145eee11df..1b85207694 100755 --- a/t/t1006-cat-file.sh +++ b/t/t1006-cat-file.sh @@ -105,13 +105,18 @@ strlen () { } maybe_remove_timestamp () { - if test -z "$2"; then - echo_without_newline "$1" - else - echo_without_newline "$(printf '%s\n' "$1" | sed -e 's/ [0-9][0-9]* [-+][0-9][0-9][0-9][0-9]$//')" - fi + if test -z "$2"; then + echo_without_newline "$1" + else + echo_without_newline "$(printf '%s\n' "$1" | remove_timestamp)" + fi } +remove_timestamp () { + sed -e 's/ [0-9][0-9]* [-+][0-9][0-9][0-9][0-9]$//' +} + + run_tests () { type=$1 sha1=$2 @@ -177,12 +182,36 @@ $content" test_cmp expect actual ' + for opt in --buffer --no-buffer + do + test -z "$content" || + test_expect_success "--batch-command $opt output of $type content is correct" ' + maybe_remove_timestamp "$batch_output" $no_ts >expect && + maybe_remove_timestamp "$(test_write_lines "contents $sha1" | + git cat-file --batch-command $opt)" $no_ts >actual && + test_cmp expect actual + ' + + test_expect_success "--batch-command $opt output of $type info is correct" ' + echo "$sha1 $type $size" >expect && + test_write_lines "info $sha1" | + git cat-file --batch-command $opt >actual && + test_cmp expect actual + ' + done + test_expect_success "custom --batch-check format" ' echo "$type $sha1" >expect && echo $sha1 | git cat-file --batch-check="%(objecttype) %(objectname)" >actual && test_cmp expect actual ' + test_expect_success "custom --batch-command format" ' + echo "$type $sha1" >expect && + echo "info $sha1" | git cat-file --batch-command="%(objecttype) %(objectname)" >actual && + test_cmp expect actual + ' + test_expect_success '--batch-check with %(rest)' ' echo "$type this is some extra content" >expect && echo "$sha1 this is some extra content" | @@ -224,6 +253,22 @@ test_expect_success "setup" ' run_tests 'blob' $hello_sha1 $hello_size "$hello_content" "$hello_content" +test_expect_success '--batch-command --buffer with flush for blob info' ' + echo "$hello_sha1 blob $hello_size" >expect && + test_write_lines "info $hello_sha1" "flush" | + GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT=1 \ + git cat-file --batch-command --buffer >actual && + test_cmp expect actual +' + +test_expect_success '--batch-command --buffer without flush for blob info' ' + touch output && + test_write_lines "info $hello_sha1" | + GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT=1 \ + git cat-file --batch-command --buffer >>output && + test_must_be_empty output +' + test_expect_success '--batch-check without %(rest) considers whole line' ' echo "$hello_sha1 blob $hello_size" >expect && git update-index --add --cacheinfo 100644 $hello_sha1 "white space" && @@ -267,7 +312,7 @@ test_expect_success \ "Reach a blob from a tag pointing to it" \ "test '$hello_content' = \"\$(git cat-file blob $tag_sha1)\"" -for batch in batch batch-check +for batch in batch batch-check batch-command do for opt in t s e p do @@ -373,6 +418,49 @@ test_expect_success "--batch-check with multiple sha1s gives correct format" ' "$(echo_without_newline "$batch_check_input" | git cat-file --batch-check)" ' +test_expect_success '--batch-command with multiple info calls gives correct format' ' + cat >expect <<-EOF && + $hello_sha1 blob $hello_size + $tree_sha1 tree $tree_size + $commit_sha1 commit $commit_size + $tag_sha1 tag $tag_size + deadbeef missing + EOF + + git cat-file --batch-command --buffer >actual <<-EOF && + info $hello_sha1 + info $tree_sha1 + info $commit_sha1 + info $tag_sha1 + info deadbeef + EOF + + test_cmp expect actual +' + +test_expect_success '--batch-command with multiple command calls gives correct format' ' + remove_timestamp >expect <<-EOF && + $hello_sha1 blob $hello_size + $hello_content + $commit_sha1 commit $commit_size + $commit_content + $tag_sha1 tag $tag_size + $tag_content + deadbeef missing + EOF + + git cat-file --batch-command --buffer >actual_raw <<-EOF && + contents $hello_sha1 + contents $commit_sha1 + contents $tag_sha1 + contents deadbeef + flush + EOF + + remove_timestamp <actual_raw >actual && + test_cmp expect actual +' + test_expect_success 'setup blobs which are likely to delta' ' test-tool genrandom foo 10240 >foo && { cat foo && echo plus; } >foo-plus && @@ -963,5 +1051,40 @@ test_expect_success 'cat-file --batch-all-objects --batch-check ignores replace' echo "$orig commit $orig_size" >expect && test_cmp expect actual ' +test_expect_success 'batch-command empty command' ' + echo "" >cmd && + test_expect_code 128 git cat-file --batch-command <cmd 2>err && + grep "^fatal:.*empty command in input.*" err +' + +test_expect_success 'batch-command whitespace before command' ' + echo " info deadbeef" >cmd && + test_expect_code 128 git cat-file --batch-command <cmd 2>err && + grep "^fatal:.*whitespace before command.*" err +' + +test_expect_success 'batch-command unknown command' ' + echo unknown_command >cmd && + test_expect_code 128 git cat-file --batch-command <cmd 2>err && + grep "^fatal:.*unknown command.*" err +' + +test_expect_success 'batch-command missing arguments' ' + echo "info" >cmd && + test_expect_code 128 git cat-file --batch-command <cmd 2>err && + grep "^fatal:.*info requires arguments.*" err +' + +test_expect_success 'batch-command flush with arguments' ' + echo "flush arg" >cmd && + test_expect_code 128 git cat-file --batch-command --buffer <cmd 2>err && + grep "^fatal:.*flush takes no arguments.*" err +' + +test_expect_success 'batch-command flush without --buffer' ' + echo "flush" >cmd && + test_expect_code 128 git cat-file --batch-command <cmd 2>err && + grep "^fatal:.*flush is only for --buffer mode.*" err +' test_done diff --git a/t/t1011-read-tree-sparse-checkout.sh b/t/t1011-read-tree-sparse-checkout.sh index 24092c09a9..dd957be1b7 100755 --- a/t/t1011-read-tree-sparse-checkout.sh +++ b/t/t1011-read-tree-sparse-checkout.sh @@ -187,11 +187,32 @@ test_expect_success 'read-tree updates worktree, absent case' ' test ! -f init.t ' +test_expect_success 'read-tree will not throw away dirty changes, non-sparse' ' + echo "/*" >.git/info/sparse-checkout && + read_tree_u_must_succeed -m -u HEAD && + + echo dirty >init.t && + read_tree_u_must_fail -m -u HEAD^ && + test_path_is_file init.t && + grep -q dirty init.t +' + +test_expect_success 'read-tree will not throw away dirty changes, sparse' ' + echo "/*" >.git/info/sparse-checkout && + read_tree_u_must_succeed -m -u HEAD && + + echo dirty >init.t && + echo sub/added >.git/info/sparse-checkout && + read_tree_u_must_fail -m -u HEAD^ && + test_path_is_file init.t && + grep -q dirty init.t +' + test_expect_success 'read-tree updates worktree, dirty case' ' echo sub/added >.git/info/sparse-checkout && git checkout -f top && echo dirty >init.t && - read_tree_u_must_succeed -m -u HEAD^ && + read_tree_u_must_fail -m -u HEAD^ && grep -q dirty init.t && rm init.t ' diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh index 3deb490187..d1833c0f31 100755 --- a/t/t1090-sparse-checkout-scope.sh +++ b/t/t1090-sparse-checkout-scope.sh @@ -52,6 +52,25 @@ test_expect_success 'return to full checkout of main' ' test "$(cat b)" = "modified" ' +test_expect_success 'skip-worktree on files outside sparse patterns' ' + git sparse-checkout disable && + git sparse-checkout set --no-cone "a*" && + git checkout-index --all --ignore-skip-worktree-bits && + + git ls-files -t >output && + ! grep ^S output >actual && + test_must_be_empty actual && + + test_config sparse.expectFilesOutsideOfPatterns true && + cat <<-\EOF >expect && + S b + S c + EOF + git ls-files -t >output && + grep ^S output >actual && + test_cmp expect actual +' + test_expect_success 'in partial clone, sparse checkout only fetches needed blobs' ' test_create_repo server && git clone "file://$(pwd)/server" client && diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh index f3a059e5af..236ab53028 100755 --- a/t/t1092-sparse-checkout-compatibility.sh +++ b/t/t1092-sparse-checkout-compatibility.sh @@ -16,7 +16,9 @@ test_expect_success 'setup' ' echo "after deep" >e && echo "after folder1" >g && echo "after x" >z && - mkdir folder1 folder2 deep x && + mkdir folder1 folder2 deep before x && + echo "before deep" >before/a && + echo "before deep again" >before/b && mkdir deep/deeper1 deep/deeper2 deep/before deep/later && mkdir deep/deeper1/deepest && mkdir deep/deeper1/deepest2 && @@ -244,6 +246,25 @@ test_expect_success 'expanded in-memory index matches full index' ' test_sparse_match git ls-files --stage ' +test_expect_success 'root directory cannot be sparse' ' + init_repos && + + # Remove all in-cone files and directories from the index, collapse index + # with `git sparse-checkout reapply` + git -C sparse-index rm -r . && + git -C sparse-index sparse-checkout reapply && + + # Verify sparse directories still present, root directory is not sparse + cat >expect <<-EOF && + before/ + folder1/ + folder2/ + x/ + EOF + git -C sparse-index ls-files --sparse >actual && + test_cmp expect actual +' + test_expect_success 'status with options' ' init_repos && test_sparse_match ls && @@ -260,6 +281,13 @@ test_expect_success 'status with options' ' test_all_match git status --porcelain=v2 -uno ' +test_expect_success 'status with diff in unexpanded sparse directory' ' + init_repos && + test_all_match git checkout rename-base && + test_all_match git reset --soft rename-out-to-out && + test_all_match git status --porcelain=v2 +' + test_expect_success 'status reports sparse-checkout' ' init_repos && git -C sparse-checkout status >full && @@ -367,7 +395,7 @@ test_expect_success 'status/add: outside sparse cone' ' write_script edit-contents <<-\EOF && echo text >>$1 EOF - run_on_sparse ../edit-contents folder1/a && + run_on_all ../edit-contents folder1/a && run_on_all ../edit-contents folder1/new && test_sparse_match git status --porcelain=v2 && @@ -376,8 +404,8 @@ test_expect_success 'status/add: outside sparse cone' ' test_sparse_match test_must_fail git add folder1/a && grep "Disable or modify the sparsity rules" sparse-checkout-err && test_sparse_unstaged folder1/a && - test_sparse_match test_must_fail git add --refresh folder1/a && - grep "Disable or modify the sparsity rules" sparse-checkout-err && + test_all_match git add --refresh folder1/a && + test_must_be_empty sparse-checkout-err && test_sparse_unstaged folder1/a && test_sparse_match test_must_fail git add folder1/new && grep "Disable or modify the sparsity rules" sparse-checkout-err && @@ -643,11 +671,11 @@ test_expect_success 'update-index modify outside sparse definition' ' run_on_sparse cp ../initial-repo/folder1/a folder1/a && run_on_all ../edit-contents folder1/a && - # If file has skip-worktree enabled, update-index does not modify the - # index entry - test_sparse_match git update-index folder1/a && - test_sparse_match git status --porcelain=v2 && - test_must_be_empty sparse-checkout-out && + # If file has skip-worktree enabled, but the file is present, it is + # treated the same as if skip-worktree is disabled + test_all_match git status --porcelain=v2 && + test_all_match git update-index folder1/a && + test_all_match git status --porcelain=v2 && # When skip-worktree is disabled (even on files outside sparse cone), file # is updated in the index @@ -794,6 +822,93 @@ test_expect_success 'update-index --cacheinfo' ' test_cmp expect sparse-checkout-out ' +for MERGE_TREES in "base HEAD update-folder2" \ + "update-folder1 update-folder2" \ + "update-folder2" +do + test_expect_success "'read-tree -mu $MERGE_TREES' with files outside sparse definition" ' + init_repos && + + # Although the index matches, without --no-sparse-checkout, outside-of- + # definition files will not exist on disk for sparse checkouts + test_all_match git read-tree -mu $MERGE_TREES && + test_all_match git status --porcelain=v2 && + test_path_is_missing sparse-checkout/folder2 && + test_path_is_missing sparse-index/folder2 && + + test_all_match git read-tree --reset -u HEAD && + test_all_match git status --porcelain=v2 && + + test_all_match git read-tree -mu --no-sparse-checkout $MERGE_TREES && + test_all_match git status --porcelain=v2 && + test_cmp sparse-checkout/folder2/a sparse-index/folder2/a && + test_cmp sparse-checkout/folder2/a full-checkout/folder2/a + + ' +done + +test_expect_success 'read-tree --merge with edit/edit conflicts in sparse directories' ' + init_repos && + + # Merge of multiple changes to same directory (but not same files) should + # succeed + test_all_match git read-tree -mu base rename-base update-folder1 && + test_all_match git status --porcelain=v2 && + + test_all_match git reset --hard && + + test_all_match git read-tree -mu rename-base update-folder2 && + test_all_match git status --porcelain=v2 && + + test_all_match git reset --hard && + + test_all_match test_must_fail git read-tree -mu base update-folder1 rename-out-to-in && + test_all_match test_must_fail git read-tree -mu rename-out-to-in update-folder1 +' + +test_expect_success 'read-tree --prefix' ' + init_repos && + + # If files differing between the index and target <commit-ish> exist + # inside the prefix, `read-tree --prefix` should fail + test_all_match test_must_fail git read-tree --prefix=deep/ deepest && + test_all_match test_must_fail git read-tree --prefix=folder1/ update-folder1 && + + # If no differing index entries exist matching the prefix, + # `read-tree --prefix` updates the index successfully + test_all_match git rm -rf deep/deeper1/deepest/ && + test_all_match git read-tree --prefix=deep/deeper1/deepest -u deepest && + test_all_match git status --porcelain=v2 && + + test_all_match git rm -rf --sparse folder1/ && + test_all_match git read-tree --prefix=folder1/ -u update-folder1 && + test_all_match git status --porcelain=v2 && + + test_all_match git rm -rf --sparse folder2/0 && + test_all_match git read-tree --prefix=folder2/0/ -u rename-out-to-out && + test_all_match git status --porcelain=v2 +' + +test_expect_success 'read-tree --merge with directory-file conflicts' ' + init_repos && + + test_all_match git checkout -b test-branch rename-base && + + # Although the index matches, without --no-sparse-checkout, outside-of- + # definition files will not exist on disk for sparse checkouts + test_sparse_match git read-tree -mu rename-out-to-out && + test_sparse_match git status --porcelain=v2 && + test_path_is_missing sparse-checkout/folder2 && + test_path_is_missing sparse-index/folder2 && + + test_sparse_match git read-tree --reset -u HEAD && + test_sparse_match git status --porcelain=v2 && + + test_sparse_match git read-tree -mu --no-sparse-checkout rename-out-to-out && + test_sparse_match git status --porcelain=v2 && + test_cmp sparse-checkout/folder2/0/1 sparse-index/folder2/0/1 +' + test_expect_success 'merge, cherry-pick, and rebase' ' init_repos && @@ -1297,6 +1412,27 @@ test_expect_success 'sparse index is not expanded: fetch/pull' ' ensure_not_expanded pull full base ' +test_expect_success 'sparse index is not expanded: read-tree' ' + init_repos && + + ensure_not_expanded checkout -b test-branch update-folder1 && + for MERGE_TREES in "base HEAD update-folder2" \ + "base HEAD rename-base" \ + "base update-folder2" \ + "base rename-base" \ + "update-folder2" + do + ensure_not_expanded read-tree -mu $MERGE_TREES && + ensure_not_expanded reset --hard || return 1 + done && + + rm -rf sparse-index/deep/deeper2 && + ensure_not_expanded add . && + ensure_not_expanded commit -m "test" && + + ensure_not_expanded read-tree --prefix=deep/deeper2 -u deepest +' + test_expect_success 'ls-files' ' init_repos && @@ -1311,6 +1447,7 @@ test_expect_success 'ls-files' ' cat >expect <<-\EOF && a + before/ deep/ e folder1- @@ -1331,36 +1468,34 @@ test_expect_success 'ls-files' ' test_cmp dense sparse && # Set up a strange condition of having a file edit - # outside of the sparse-checkout cone. This is just - # to verify that sparse-checkout and sparse-index - # behave the same in this case. + # outside of the sparse-checkout cone. We want to verify + # that all modes handle this the same, and detect the + # modification. write_script edit-content <<-\EOF && - mkdir folder1 && + mkdir -p folder1 && echo content >>folder1/a EOF - run_on_sparse ../edit-content && + run_on_all ../edit-content && - # ls-files does not currently notice modified files whose - # cache entries are marked SKIP_WORKTREE. This may change - # in the future, but here we test that sparse index does - # not accidentally create a change of behavior. - test_sparse_match git ls-files --modified && - test_must_be_empty sparse-checkout-out && - test_must_be_empty sparse-index-out && + test_all_match git ls-files --modified && git -C sparse-index ls-files --sparse --modified >sparse-index-out && - test_must_be_empty sparse-index-out && + cat >expect <<-\EOF && + folder1/a + EOF + test_cmp expect sparse-index-out && # Add folder1 to the sparse-checkout cone and # check that ls-files shows the expanded files. test_sparse_match git sparse-checkout add folder1 && - test_sparse_match git ls-files --modified && + test_all_match git ls-files --modified && test_all_match git ls-files && git -C sparse-index ls-files --sparse >actual && cat >expect <<-\EOF && a + before/ deep/ e folder1- diff --git a/t/t1410-reflog.sh b/t/t1410-reflog.sh index 68f69bb543..ea8e6ac2a0 100755 --- a/t/t1410-reflog.sh +++ b/t/t1410-reflog.sh @@ -423,4 +423,13 @@ test_expect_success 'expire with multiple worktrees' ' ) ' +test_expect_success REFFILES 'empty reflog' ' + test_when_finished "rm -rf empty" && + git init empty && + test_commit -C empty A && + >empty/.git/logs/refs/heads/foo && + git -C empty reflog expire --all 2>err && + test_must_be_empty err +' + test_done diff --git a/t/t1503-rev-parse-verify.sh b/t/t1503-rev-parse-verify.sh index 94fe413ee3..ba43168d12 100755 --- a/t/t1503-rev-parse-verify.sh +++ b/t/t1503-rev-parse-verify.sh @@ -132,8 +132,9 @@ test_expect_success 'use --default' ' test_must_fail git rev-parse --verify --default bar ' -test_expect_success 'main@{n} for various n' ' - N=$(git reflog | wc -l) && +test_expect_success !SANITIZE_LEAK 'main@{n} for various n' ' + git reflog >out && + N=$(wc -l <out) && Nm1=$(($N-1)) && Np1=$(($N+1)) && git rev-parse --verify main@{0} && diff --git a/t/t2012-checkout-last.sh b/t/t2012-checkout-last.sh index 42601d5a31..1f6c4ed042 100755 --- a/t/t2012-checkout-last.sh +++ b/t/t2012-checkout-last.sh @@ -21,14 +21,20 @@ test_expect_success 'first branch switch' ' git checkout other ' +test_cmp_symbolic_HEAD_ref () { + echo refs/heads/"$1" >expect && + git symbolic-ref HEAD >actual && + test_cmp expect actual +} + test_expect_success '"checkout -" switches back' ' git checkout - && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main" + test_cmp_symbolic_HEAD_ref main ' test_expect_success '"checkout -" switches forth' ' git checkout - && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/other" + test_cmp_symbolic_HEAD_ref other ' test_expect_success 'detach HEAD' ' @@ -37,12 +43,16 @@ test_expect_success 'detach HEAD' ' test_expect_success '"checkout -" attaches again' ' git checkout - && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/other" + test_cmp_symbolic_HEAD_ref other ' test_expect_success '"checkout -" detaches again' ' git checkout - && - test "z$(git rev-parse HEAD)" = "z$(git rev-parse other)" && + + git rev-parse other >expect && + git rev-parse HEAD >actual && + test_cmp expect actual && + test_must_fail git symbolic-ref HEAD ' @@ -63,31 +73,31 @@ more_switches () { test_expect_success 'switch to the last' ' more_switches && git checkout @{-1} && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch2" + test_cmp_symbolic_HEAD_ref branch2 ' test_expect_success 'switch to second from the last' ' more_switches && git checkout @{-2} && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch3" + test_cmp_symbolic_HEAD_ref branch3 ' test_expect_success 'switch to third from the last' ' more_switches && git checkout @{-3} && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch4" + test_cmp_symbolic_HEAD_ref branch4 ' test_expect_success 'switch to fourth from the last' ' more_switches && git checkout @{-4} && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch5" + test_cmp_symbolic_HEAD_ref branch5 ' test_expect_success 'switch to twelfth from the last' ' more_switches && git checkout @{-12} && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/branch13" + test_cmp_symbolic_HEAD_ref branch13 ' test_expect_success 'merge base test setup' ' @@ -98,19 +108,28 @@ test_expect_success 'merge base test setup' ' test_expect_success 'another...main' ' git checkout another && git checkout another...main && - test "z$(git rev-parse --verify HEAD)" = "z$(git rev-parse --verify main^)" + + git rev-parse --verify main^ >expect && + git rev-parse --verify HEAD >actual && + test_cmp expect actual ' test_expect_success '...main' ' git checkout another && git checkout ...main && - test "z$(git rev-parse --verify HEAD)" = "z$(git rev-parse --verify main^)" + + git rev-parse --verify main^ >expect && + git rev-parse --verify HEAD >actual && + test_cmp expect actual ' test_expect_success 'main...' ' git checkout another && git checkout main... && - test "z$(git rev-parse --verify HEAD)" = "z$(git rev-parse --verify main^)" + + git rev-parse --verify main^ >expect && + git rev-parse --verify HEAD >actual && + test_cmp expect actual ' test_expect_success '"checkout -" works after a rebase A' ' @@ -118,7 +137,7 @@ test_expect_success '"checkout -" works after a rebase A' ' git checkout other && git rebase main && git checkout - && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main" + test_cmp_symbolic_HEAD_ref main ' test_expect_success '"checkout -" works after a rebase A B' ' @@ -127,7 +146,7 @@ test_expect_success '"checkout -" works after a rebase A B' ' git checkout other && git rebase main moodle && git checkout - && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main" + test_cmp_symbolic_HEAD_ref main ' test_expect_success '"checkout -" works after a rebase -i A' ' @@ -135,7 +154,7 @@ test_expect_success '"checkout -" works after a rebase -i A' ' git checkout other && git rebase -i main && git checkout - && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main" + test_cmp_symbolic_HEAD_ref main ' test_expect_success '"checkout -" works after a rebase -i A B' ' @@ -144,7 +163,7 @@ test_expect_success '"checkout -" works after a rebase -i A B' ' git checkout other && git rebase main foodle && git checkout - && - test "z$(git symbolic-ref HEAD)" = "zrefs/heads/main" + test_cmp_symbolic_HEAD_ref main ' test_done diff --git a/t/t2200-add-update.sh b/t/t2200-add-update.sh index acd3650d3c..0c38f8e356 100755 --- a/t/t2200-add-update.sh +++ b/t/t2200-add-update.sh @@ -14,7 +14,6 @@ only the updates to dir/sub. Also tested are "git add -u" without limiting, and "git add -u" without contents changes, and other conditions' -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' @@ -41,20 +40,28 @@ test_expect_success update ' ' test_expect_success 'update noticed a removal' ' - test "$(git ls-files dir1/sub1)" = "" + git ls-files dir1/sub1 >out && + test_must_be_empty out ' test_expect_success 'update touched correct path' ' - test "$(git diff-files --name-status dir2/sub3)" = "" + git diff-files --name-status dir2/sub3 >out && + test_must_be_empty out ' test_expect_success 'update did not touch other tracked files' ' - test "$(git diff-files --name-status check)" = "M check" && - test "$(git diff-files --name-status top)" = "M top" + echo "M check" >expect && + git diff-files --name-status check >actual && + test_cmp expect actual && + + echo "M top" >expect && + git diff-files --name-status top >actual && + test_cmp expect actual ' test_expect_success 'update did not touch untracked files' ' - test "$(git ls-files dir2/other)" = "" + git ls-files dir2/other >out && + test_must_be_empty out ' test_expect_success 'cache tree has not been corrupted' ' @@ -76,9 +83,8 @@ test_expect_success 'update from a subdirectory' ' ' test_expect_success 'change gets noticed' ' - - test "$(git diff-files --name-status dir1)" = "" - + git diff-files --name-status dir1 >out && + test_must_be_empty out ' test_expect_success 'non-qualified update in subdir updates from the root' ' @@ -103,7 +109,8 @@ test_expect_success 'replace a file with a symlink' ' test_expect_success 'add everything changed' ' git add -u && - test -z "$(git diff-files)" + git diff-files >out && + test_must_be_empty out ' @@ -111,7 +118,8 @@ test_expect_success 'touch and then add -u' ' touch check && git add -u && - test -z "$(git diff-files)" + git diff-files >out && + test_must_be_empty out ' @@ -119,7 +127,8 @@ test_expect_success 'touch and then add explicitly' ' touch check && git add check && - test -z "$(git diff-files)" + git diff-files >out && + test_must_be_empty out ' diff --git a/t/t3302-notes-index-expensive.sh b/t/t3302-notes-index-expensive.sh index bc9d8ee1e6..bb5fea02a0 100755 --- a/t/t3302-notes-index-expensive.sh +++ b/t/t3302-notes-index-expensive.sh @@ -8,7 +8,6 @@ test_description='Test commit notes index (expensive!)' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh create_repo () { @@ -65,7 +64,8 @@ create_repo () { test_notes () { count=$1 && git config core.notesRef refs/notes/commits && - git log | grep "^ " >output && + git log >tmp && + grep "^ " tmp >output && i=$count && while test $i -gt 0 do @@ -90,7 +90,7 @@ write_script time_notes <<\EOF unset GIT_NOTES_REF ;; esac - git log + git log || exit $? i=$(($i+1)) done >/dev/null EOF diff --git a/t/t3303-notes-subtrees.sh b/t/t3303-notes-subtrees.sh index 7e0a8960af..eac193757b 100755 --- a/t/t3303-notes-subtrees.sh +++ b/t/t3303-notes-subtrees.sh @@ -5,7 +5,6 @@ test_description='Test commit notes organized in subtrees' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh number_of_commits=100 @@ -79,7 +78,7 @@ test_sha1_based () { ( start_note_commit && nr=$number_of_commits && - git rev-list refs/heads/main | + git rev-list refs/heads/main >out && while read sha1; do note_path=$(echo "$sha1" | sed "$1") cat <<INPUT_END && @@ -91,9 +90,9 @@ EOF INPUT_END nr=$(($nr-1)) - done - ) | - git fast-import --quiet + done <out + ) >gfi && + git fast-import --quiet <gfi } test_expect_success 'test notes in 2/38-fanout' 'test_sha1_based "s|^..|&/|"' diff --git a/t/t3305-notes-fanout.sh b/t/t3305-notes-fanout.sh index 1f5964865a..9976d787f4 100755 --- a/t/t3305-notes-fanout.sh +++ b/t/t3305-notes-fanout.sh @@ -2,7 +2,6 @@ test_description='Test that adding/removing many notes triggers automatic fanout restructuring' -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh path_has_fanout() { @@ -24,7 +23,7 @@ touched_one_note_with_fanout() { all_notes_have_fanout() { notes_commit=$1 && fanout=$2 && - git ls-tree -r --name-only $notes_commit 2>/dev/null | + git ls-tree -r --name-only $notes_commit | while read path do path_has_fanout $path $fanout || return 1 @@ -51,8 +50,9 @@ test_expect_success 'creating many notes with git-notes' ' done ' -test_expect_success 'many notes created correctly with git-notes' ' - git log | grep "^ " > output && +test_expect_success !SANITIZE_LEAK 'many notes created correctly with git-notes' ' + git log >output.raw && + grep "^ " output.raw >output && i=$num_notes && while test $i -gt 0 do @@ -91,13 +91,13 @@ test_expect_success 'stable fanout 0 is followed by stable fanout 1' ' test_expect_success 'deleting most notes with git-notes' ' remove_notes=285 && i=0 && - git rev-list HEAD | + git rev-list HEAD >revs && while test $i -lt $remove_notes && read sha1 do i=$(($i + 1)) && test_tick && - git notes remove "$sha1" 2>/dev/null || return 1 - done + git notes remove "$sha1" || return 1 + done <revs ' test_expect_success 'most notes deleted correctly with git-notes' ' diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh index 71b1735e1d..d5a8ee39fc 100755 --- a/t/t3400-rebase.sh +++ b/t/t3400-rebase.sh @@ -18,10 +18,7 @@ GIT_AUTHOR_EMAIL=bogus@email@address export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL test_expect_success 'prepare repository with topic branches' ' - git config core.logAllRefUpdates true && - echo First >A && - git update-index --add A && - git commit -m "Add A." && + test_commit "Add A." A First First && git checkout -b force-3way && echo Dummy >Y && git update-index --add Y && @@ -32,9 +29,7 @@ test_expect_success 'prepare repository with topic branches' ' git mv A D/A && git commit -m "Move A." && git checkout -b my-topic-branch main && - echo Second >B && - git update-index --add B && - git commit -m "Add B." && + test_commit "Add B." B Second Second && git checkout -f main && echo Third >>A && git update-index A && @@ -399,6 +394,15 @@ test_expect_success 'switch to branch not checked out' ' git rebase main other ' +test_expect_success 'switch to non-branch detaches HEAD' ' + git checkout main && + old_main=$(git rev-parse HEAD) && + git rebase First Second^0 && + test_cmp_rev HEAD Second && + test_cmp_rev main $old_main && + test_must_fail git symbolic-ref HEAD +' + test_expect_success 'refuse to switch to branch checked out elsewhere' ' git checkout main && git worktree add wt && diff --git a/t/t3705-add-sparse-checkout.sh b/t/t3705-add-sparse-checkout.sh index 81f3384eee..95609046c6 100755 --- a/t/t3705-add-sparse-checkout.sh +++ b/t/t3705-add-sparse-checkout.sh @@ -19,6 +19,7 @@ setup_sparse_entry () { fi && git add sparse_entry && git update-index --skip-worktree sparse_entry && + git config core.sparseCheckout false && git commit --allow-empty -m "ensure sparse_entry exists at HEAD" && SPARSE_ENTRY_BLOB=$(git rev-parse :sparse_entry) } @@ -126,6 +127,7 @@ test_expect_success 'git add --chmod does not update sparse entries' ' ' test_expect_success 'git add --renormalize does not update sparse entries' ' + test_when_finished rm .gitattributes && test_config core.autocrlf false && setup_sparse_entry "LINEONE\r\nLINETWO\r\n" && echo "sparse_entry text=auto" >.gitattributes && diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh index f36e121210..42638b11d8 100755 --- a/t/t3903-stash.sh +++ b/t/t3903-stash.sh @@ -41,7 +41,7 @@ diff_cmp () { rm -f "$1.compare" "$2.compare" } -test_expect_success 'stash some dirty working directory' ' +setup_stash() { echo 1 >file && git add file && echo unrelated >other-file && @@ -55,6 +55,10 @@ test_expect_success 'stash some dirty working directory' ' git stash && git diff-files --quiet && git diff-index --cached --quiet HEAD +} + +test_expect_success 'stash some dirty working directory' ' + setup_stash ' cat >expect <<EOF @@ -185,6 +189,43 @@ test_expect_success 'drop middle stash by index' ' test 1 = $(git show HEAD:file) ' +test_expect_success 'drop stash reflog updates refs/stash' ' + git reset --hard && + git rev-parse refs/stash >expect && + echo 9 >file && + git stash && + git stash drop stash@{0} && + git rev-parse refs/stash >actual && + test_cmp expect actual +' + +test_expect_success REFFILES 'drop stash reflog updates refs/stash with rewrite' ' + git init repo && + ( + cd repo && + setup_stash + ) && + echo 9 >repo/file && + + old_oid="$(git -C repo rev-parse stash@{0})" && + git -C repo stash && + new_oid="$(git -C repo rev-parse stash@{0})" && + + cat >expect <<-EOF && + $(test_oid zero) $old_oid + $old_oid $new_oid + EOF + cut -d" " -f1-2 repo/.git/logs/refs/stash >actual && + test_cmp expect actual && + + git -C repo stash drop stash@{1} && + cut -d" " -f1-2 repo/.git/logs/refs/stash >actual && + cat >expect <<-EOF && + $(test_oid zero) $new_oid + EOF + test_cmp expect actual +' + test_expect_success 'stash pop' ' git reset --hard && git stash pop && diff --git a/t/t4015-diff-whitespace.sh b/t/t4015-diff-whitespace.sh index 9babf13bc9..f3e20dd5bb 100755 --- a/t/t4015-diff-whitespace.sh +++ b/t/t4015-diff-whitespace.sh @@ -6,6 +6,8 @@ test_description='Test special whitespace in diff engine. ' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-diff.sh @@ -1622,7 +1624,7 @@ test_expect_success 'cmd option assumes configured colored-moved' ' test_cmp expected actual ' -test_expect_success 'no effect from --color-moved with --word-diff' ' +test_expect_success 'no effect on diff from --color-moved with --word-diff' ' cat <<-\EOF >text.txt && Lorem Ipsum is simply dummy text of the printing and typesetting industry. EOF @@ -1636,6 +1638,12 @@ test_expect_success 'no effect from --color-moved with --word-diff' ' test_cmp expect actual ' +test_expect_success !SANITIZE_LEAK 'no effect on show from --color-moved with --word-diff' ' + git show --color-moved --word-diff >actual && + git show --word-diff >expect && + test_cmp expect actual +' + test_expect_success 'set up whitespace tests' ' git reset --hard && # Note that these lines have no leading or trailing whitespace. @@ -2016,7 +2024,7 @@ test_expect_success '--color-moved rewinds for MIN_ALNUM_COUNT' ' test_cmp expected actual ' -test_expect_success 'move detection with submodules' ' +test_expect_success !SANITIZE_LEAK 'move detection with submodules' ' test_create_repo bananas && echo ripe >bananas/recipe && git -C bananas add recipe && diff --git a/t/t4018/kotlin-class b/t/t4018/kotlin-class new file mode 100644 index 0000000000..bb864f22e6 --- /dev/null +++ b/t/t4018/kotlin-class @@ -0,0 +1,5 @@ +class RIGHT { + //comment + //comment + return ChangeMe +} diff --git a/t/t4018/kotlin-enum-class b/t/t4018/kotlin-enum-class new file mode 100644 index 0000000000..8885f908fd --- /dev/null +++ b/t/t4018/kotlin-enum-class @@ -0,0 +1,5 @@ +enum class RIGHT{ + // Left + // a comment + ChangeMe +} diff --git a/t/t4018/kotlin-fun b/t/t4018/kotlin-fun new file mode 100644 index 0000000000..2a60280256 --- /dev/null +++ b/t/t4018/kotlin-fun @@ -0,0 +1,5 @@ +fun RIGHT(){ + //a comment + //b comment + return ChangeMe() +} diff --git a/t/t4018/kotlin-inheritace-class b/t/t4018/kotlin-inheritace-class new file mode 100644 index 0000000000..77376c1f05 --- /dev/null +++ b/t/t4018/kotlin-inheritace-class @@ -0,0 +1,5 @@ +open class RIGHT{ + // a comment + // b comment + // ChangeMe +} diff --git a/t/t4018/kotlin-inline-class b/t/t4018/kotlin-inline-class new file mode 100644 index 0000000000..7bf46dd8d4 --- /dev/null +++ b/t/t4018/kotlin-inline-class @@ -0,0 +1,5 @@ +value class RIGHT(Args){ + // a comment + // b comment + ChangeMe +} diff --git a/t/t4018/kotlin-interface b/t/t4018/kotlin-interface new file mode 100644 index 0000000000..f686ba7770 --- /dev/null +++ b/t/t4018/kotlin-interface @@ -0,0 +1,5 @@ +interface RIGHT{ + //another comment + //another comment + //ChangeMe +} diff --git a/t/t4018/kotlin-nested-fun b/t/t4018/kotlin-nested-fun new file mode 100644 index 0000000000..12186858cb --- /dev/null +++ b/t/t4018/kotlin-nested-fun @@ -0,0 +1,9 @@ +class LEFT{ + class CENTER{ + fun RIGHT( a:Int){ + //comment + //comment + ChangeMe + } + } +} diff --git a/t/t4018/kotlin-public-class b/t/t4018/kotlin-public-class new file mode 100644 index 0000000000..9433fcc226 --- /dev/null +++ b/t/t4018/kotlin-public-class @@ -0,0 +1,5 @@ +public class RIGHT{ + //comment1 + //comment2 + ChangeMe +} diff --git a/t/t4018/kotlin-sealed-class b/t/t4018/kotlin-sealed-class new file mode 100644 index 0000000000..0efa4a4eaf --- /dev/null +++ b/t/t4018/kotlin-sealed-class @@ -0,0 +1,5 @@ +sealed class RIGHT { + // a comment + // b comment + ChangeMe +} diff --git a/t/t4020-diff-external.sh b/t/t4020-diff-external.sh index 54bb8ef27e..1219f8bd4c 100755 --- a/t/t4020-diff-external.sh +++ b/t/t4020-diff-external.sh @@ -24,45 +24,38 @@ test_expect_success setup ' ' test_expect_success 'GIT_EXTERNAL_DIFF environment' ' - - GIT_EXTERNAL_DIFF=echo git diff | { - read path oldfile oldhex oldmode newfile newhex newmode && - test "z$path" = zfile && - test "z$oldmode" = z100644 && - test "z$newhex" = "z$ZERO_OID" && - test "z$newmode" = z100644 && - oh=$(git rev-parse --verify HEAD:file) && - test "z$oh" = "z$oldhex" - } + cat >expect <<-EOF && + file $(git rev-parse --verify HEAD:file) 100644 file $(test_oid zero) 100644 + EOF + GIT_EXTERNAL_DIFF=echo git diff >out && + cut -d" " -f1,3- <out >actual && + test_cmp expect actual ' -test_expect_success 'GIT_EXTERNAL_DIFF environment should apply only to diff' ' - - GIT_EXTERNAL_DIFF=echo git log -p -1 HEAD | - grep "^diff --git a/file b/file" +test_expect_success !SANITIZE_LEAK 'GIT_EXTERNAL_DIFF environment should apply only to diff' ' + GIT_EXTERNAL_DIFF=echo git log -p -1 HEAD >out && + grep "^diff --git a/file b/file" out ' test_expect_success 'GIT_EXTERNAL_DIFF environment and --no-ext-diff' ' - - GIT_EXTERNAL_DIFF=echo git diff --no-ext-diff | - grep "^diff --git a/file b/file" + GIT_EXTERNAL_DIFF=echo git diff --no-ext-diff >out && + grep "^diff --git a/file b/file" out ' test_expect_success SYMLINKS 'typechange diff' ' rm -f file && ln -s elif file && - GIT_EXTERNAL_DIFF=echo git diff | { - read path oldfile oldhex oldmode newfile newhex newmode && - test "z$path" = zfile && - test "z$oldmode" = z100644 && - test "z$newhex" = "z$ZERO_OID" && - test "z$newmode" = z120000 && - oh=$(git rev-parse --verify HEAD:file) && - test "z$oh" = "z$oldhex" - } && + + cat >expect <<-EOF && + file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 120000 + EOF + GIT_EXTERNAL_DIFF=echo git diff >out && + cut -d" " -f1,3-4,6- <out >actual && + test_cmp expect actual && + GIT_EXTERNAL_DIFF=echo git diff --no-ext-diff >actual && git diff >expect && test_cmp expect actual @@ -72,27 +65,25 @@ test_expect_success 'diff.external' ' git reset --hard && echo third >file && test_config diff.external echo && - git diff | { - read path oldfile oldhex oldmode newfile newhex newmode && - test "z$path" = zfile && - test "z$oldmode" = z100644 && - test "z$newhex" = "z$ZERO_OID" && - test "z$newmode" = z100644 && - oh=$(git rev-parse --verify HEAD:file) && - test "z$oh" = "z$oldhex" - } + + cat >expect <<-EOF && + file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 100644 + EOF + git diff >out && + cut -d" " -f1,3-4,6- <out >actual && + test_cmp expect actual ' -test_expect_success 'diff.external should apply only to diff' ' +test_expect_success !SANITIZE_LEAK 'diff.external should apply only to diff' ' test_config diff.external echo && - git log -p -1 HEAD | - grep "^diff --git a/file b/file" + git log -p -1 HEAD >out && + grep "^diff --git a/file b/file" out ' test_expect_success 'diff.external and --no-ext-diff' ' test_config diff.external echo && - git diff --no-ext-diff | - grep "^diff --git a/file b/file" + git diff --no-ext-diff >out && + grep "^diff --git a/file b/file" out ' test_expect_success 'diff attribute' ' @@ -103,29 +94,23 @@ test_expect_success 'diff attribute' ' echo >.gitattributes "file diff=parrot" && - git diff | { - read path oldfile oldhex oldmode newfile newhex newmode && - test "z$path" = zfile && - test "z$oldmode" = z100644 && - test "z$newhex" = "z$ZERO_OID" && - test "z$newmode" = z100644 && - oh=$(git rev-parse --verify HEAD:file) && - test "z$oh" = "z$oldhex" - } - + cat >expect <<-EOF && + file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 100644 + EOF + git diff >out && + cut -d" " -f1,3-4,6- <out >actual && + test_cmp expect actual ' -test_expect_success 'diff attribute should apply only to diff' ' - - git log -p -1 HEAD | - grep "^diff --git a/file b/file" +test_expect_success !SANITIZE_LEAK 'diff attribute should apply only to diff' ' + git log -p -1 HEAD >out && + grep "^diff --git a/file b/file" out ' test_expect_success 'diff attribute and --no-ext-diff' ' - - git diff --no-ext-diff | - grep "^diff --git a/file b/file" + git diff --no-ext-diff >out && + grep "^diff --git a/file b/file" out ' @@ -136,48 +121,55 @@ test_expect_success 'diff attribute' ' echo >.gitattributes "file diff=color" && - git diff | { - read path oldfile oldhex oldmode newfile newhex newmode && - test "z$path" = zfile && - test "z$oldmode" = z100644 && - test "z$newhex" = "z$ZERO_OID" && - test "z$newmode" = z100644 && - oh=$(git rev-parse --verify HEAD:file) && - test "z$oh" = "z$oldhex" - } - + cat >expect <<-EOF && + file $(git rev-parse --verify HEAD:file) 100644 $(test_oid zero) 100644 + EOF + git diff >out && + cut -d" " -f1,3-4,6- <out >actual && + test_cmp expect actual ' -test_expect_success 'diff attribute should apply only to diff' ' - - git log -p -1 HEAD | - grep "^diff --git a/file b/file" +test_expect_success !SANITIZE_LEAK 'diff attribute should apply only to diff' ' + git log -p -1 HEAD >out && + grep "^diff --git a/file b/file" out ' test_expect_success 'diff attribute and --no-ext-diff' ' - - git diff --no-ext-diff | - grep "^diff --git a/file b/file" + git diff --no-ext-diff >out && + grep "^diff --git a/file b/file" out ' test_expect_success 'GIT_EXTERNAL_DIFF trumps diff.external' ' >.gitattributes && test_config diff.external "echo ext-global" && - GIT_EXTERNAL_DIFF="echo ext-env" git diff | grep ext-env + + cat >expect <<-EOF && + ext-env file $(git rev-parse --verify HEAD:file) 100644 file $(test_oid zero) 100644 + EOF + GIT_EXTERNAL_DIFF="echo ext-env" git diff >out && + cut -d" " -f1-2,4- <out >actual && + test_cmp expect actual ' test_expect_success 'attributes trump GIT_EXTERNAL_DIFF and diff.external' ' test_config diff.foo.command "echo ext-attribute" && test_config diff.external "echo ext-global" && echo "file diff=foo" >.gitattributes && - GIT_EXTERNAL_DIFF="echo ext-env" git diff | grep ext-attribute + + cat >expect <<-EOF && + ext-attribute file $(git rev-parse --verify HEAD:file) 100644 file $(test_oid zero) 100644 + EOF + GIT_EXTERNAL_DIFF="echo ext-env" git diff >out && + cut -d" " -f1-2,4- <out >actual && + test_cmp expect actual ' test_expect_success 'no diff with -diff' ' echo >.gitattributes "file -diff" && - git diff | grep Binary + git diff >out && + grep Binary out ' echo NULZbetweenZwords | perl -pe 'y/Z/\000/' > file @@ -217,7 +209,12 @@ test_expect_success 'GIT_EXTERNAL_DIFF generates pretty paths' ' touch file.ext && git add file.ext && echo with extension > file.ext && - GIT_EXTERNAL_DIFF=echo git diff file.ext | grep ......_file\.ext && + + cat >expect <<-EOF && + file.ext file $(git rev-parse --verify HEAD:file) 100644 file.ext $(test_oid zero) 100644 + EOF + GIT_EXTERNAL_DIFF=echo git diff file.ext >out && + cut -d" " -f1,3- <out >actual && git update-index --force-remove file.ext && rm file.ext ' diff --git a/t/t4027-diff-submodule.sh b/t/t4027-diff-submodule.sh index 6cef0da982..295da987cc 100755 --- a/t/t4027-diff-submodule.sh +++ b/t/t4027-diff-submodule.sh @@ -2,7 +2,6 @@ test_description='difference in submodules' -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-diff.sh @@ -28,10 +27,8 @@ test_expect_success setup ' git commit -m "submodule #2" ) && - set x $( - cd sub && - git rev-list HEAD - ) && + git -C sub rev-list HEAD >revs && + set x $(cat revs) && echo ":160000 160000 $3 $ZERO_OID M sub" >expect && subtip=$3 subprev=$2 ' diff --git a/t/t4034-diff-words.sh b/t/t4034-diff-words.sh index d5abcf4b4c..15764ee9ac 100755 --- a/t/t4034-diff-words.sh +++ b/t/t4034-diff-words.sh @@ -324,6 +324,7 @@ test_language_driver dts test_language_driver fortran test_language_driver html test_language_driver java +test_language_driver kotlin test_language_driver matlab test_language_driver objc test_language_driver pascal diff --git a/t/t4034/kotlin/expect b/t/t4034/kotlin/expect new file mode 100644 index 0000000000..7f76f7540d --- /dev/null +++ b/t/t4034/kotlin/expect @@ -0,0 +1,43 @@ +<BOLD>diff --git a/pre b/post<RESET> +<BOLD>index 11ea3de..2e1df4c 100644<RESET> +<BOLD>--- a/pre<RESET> +<BOLD>+++ b/post<RESET> +<CYAN>@@ -1,30 +1,30 @@<RESET> +println("Hello World<RED>!\n<RESET><GREEN>?<RESET>") +<GREEN>(<RESET>1<GREEN>) (<RESET>-1e10<GREEN>) (<RESET>0xabcdef<GREEN>)<RESET> '<RED>x<RESET><GREEN>y<RESET>' +[<RED>a<RESET><GREEN>x<RESET>] <RED>a<RESET><GREEN>x<RESET>-><RED>b a<RESET><GREEN>y x<RESET>.<RED>b<RESET><GREEN>y<RESET> +!<RED>a a<RESET><GREEN>x x<RESET>.inv() <RED>a<RESET><GREEN>x<RESET>*<RED>b a<RESET><GREEN>y x<RESET>&<RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET>*<RED>b a<RESET><GREEN>y x<RESET>/<RED>b a<RESET><GREEN>y x<RESET>%<RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET>+<RED>b a<RESET><GREEN>y x<RESET>-<RED>b<RESET><GREEN>y<RESET> +a <RED>shr<RESET><GREEN>shl<RESET> b +<RED>a<RESET><GREEN>x<RESET><<RED>b a<RESET><GREEN>y x<RESET><=<RED>b a<RESET><GREEN>y x<RESET>><RED>b a<RESET><GREEN>y x<RESET>>=<RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET>==<RED>b a<RESET><GREEN>y x<RESET>!=<RED>b a<RESET><GREEN>y x<RESET>===<RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET> and <RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET>^<RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET> or <RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET>&&<RED>b a<RESET><GREEN>y x<RESET>||<RED>b<RESET> +<RED>a<RESET><GREEN>y<RESET> +<GREEN>x<RESET>=<RED>b a<RESET><GREEN>y x<RESET>+=<RED>b a<RESET><GREEN>y x<RESET>-=<RED>b a<RESET><GREEN>y x<RESET>*=<RED>b a<RESET><GREEN>y x<RESET>/=<RED>b a<RESET><GREEN>y x<RESET>%=<RED>b a<RESET><GREEN>y x<RESET><<=<RED>b a<RESET><GREEN>y x<RESET>>>=<RED>b a<RESET><GREEN>y x<RESET>&=<RED>b a<RESET><GREEN>y x<RESET>^=<RED>b a<RESET><GREEN>y x<RESET>|=<RED>b<RESET><GREEN>y<RESET> +a<RED>=<RESET><GREEN>+=<RESET>b c<RED>+=<RESET><GREEN>=<RESET>d e<RED>-=<RESET><GREEN><=<RESET>f g<RED>*=<RESET><GREEN>>=<RESET>h i<RED>/=<RESET><GREEN>/<RESET>j k<RED>%=<RESET><GREEN>%<RESET>l m<RED><<=<RESET><GREEN><<<RESET>n o<RED>>>=<RESET><GREEN>>><RESET>p q<RED>&=<RESET><GREEN>&<RESET>r s<RED>^=<RESET><GREEN>^<RESET>t u<RED>|=<RESET><GREEN>|<RESET>v +a<RED><<=<RESET><GREEN><=<RESET>b +a<RED>||<RESET><GREEN>|<RESET>b a<RED>&&<RESET><GREEN>&<RESET>b +<RED>a<RESET><GREEN>x<RESET>,y +--a<RED>==<RESET><GREEN>!=<RESET>--b +a++<RED>==<RESET><GREEN>!=<RESET>++b +<RED>0xFF_EC_DE_5E 0b100_000 100_000<RESET><GREEN>0xFF_E1_DE_5E 0b100_100 200_000<RESET> +a<RED>==<RESET><GREEN>===<RESET>b +a<RED>!!<RESET><GREEN>!=<RESET>b +<RED>_32<RESET><GREEN>_33<RESET>.find(arr) +X.<RED>fill<RESET><GREEN>find<RESET>() +X.<RED>u<RESET><GREEN>f<RESET>+1 +X.u<RED>-<RESET><GREEN>+<RESET>2 +a<RED>.<RESET><GREEN>..<RESET>b +a<RED>?.<RESET><GREEN>?:<RESET>b +<RED>.32_00_456<RESET><GREEN>.32_00_446<RESET> diff --git a/t/t4034/kotlin/post b/t/t4034/kotlin/post new file mode 100644 index 0000000000..2e1df4c6d5 --- /dev/null +++ b/t/t4034/kotlin/post @@ -0,0 +1,30 @@ +println("Hello World?") +(1) (-1e10) (0xabcdef) 'y' +[x] x->y x.y +!x x.inv() x*y x&y +x*y x/y x%y +x+y x-y +a shl b +x<y x<=y x>y x>=y +x==y x!=y x===y +x and y +x^y +x or y +x&&y x||y +x=y x+=y x-=y x*=y x/=y x%=y x<<=y x>>=y x&=y x^=y x|=y +a+=b c=d e<=f g>=h i/j k%l m<<n o>>p q&r s^t u|v +a<=b +a|b a&b +x,y +--a!=--b +a++!=++b +0xFF_E1_DE_5E 0b100_100 200_000 +a===b +a!=b +_33.find(arr) +X.find() +X.f+1 +X.u+2 +a..b +a?:b +.32_00_446 diff --git a/t/t4034/kotlin/pre b/t/t4034/kotlin/pre new file mode 100644 index 0000000000..11ea3de665 --- /dev/null +++ b/t/t4034/kotlin/pre @@ -0,0 +1,30 @@ +println("Hello World!\n") +1 -1e10 0xabcdef 'x' +[a] a->b a.b +!a a.inv() a*b a&b +a*b a/b a%b +a+b a-b +a shr b +a<b a<=b a>b a>=b +a==b a!=b a===b +a and b +a^b +a or b +a&&b a||b +a=b a+=b a-=b a*=b a/=b a%=b a<<=b a>>=b a&=b a^=b a|=b +a=b c+=d e-=f g*=h i/=j k%=l m<<=n o>>=p q&=r s^=t u|=v +a<<=b +a||b a&&b +a,y +--a==--b +a++==++b +0xFF_EC_DE_5E 0b100_000 100_000 +a==b +a!!b +_32.find(arr) +X.fill() +X.u+1 +X.u-2 +a.b +a?.b +.32_00_456 diff --git a/t/t4123-apply-shrink.sh b/t/t4123-apply-shrink.sh index dfa053ff28..3ef84619f5 100755 --- a/t/t4123-apply-shrink.sh +++ b/t/t4123-apply-shrink.sh @@ -2,8 +2,6 @@ test_description='apply a patch that is larger than the preimage' - -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh cat >F <<\EOF @@ -41,20 +39,8 @@ test_expect_success setup ' ' test_expect_success 'apply should fail gracefully' ' - - if git apply --index patch - then - echo Oops, should not have succeeded - false - else - status=$? && - echo "Status was $status" && - if test -f .git/index.lock - then - echo Oops, should not have crashed - false - fi - fi + test_must_fail git apply --index patch && + test_path_is_missing .git/index.lock ' test_done diff --git a/t/t4128-apply-root.sh b/t/t4128-apply-root.sh index cb3181e8b7..f6db5a79dd 100755 --- a/t/t4128-apply-root.sh +++ b/t/t4128-apply-root.sh @@ -2,8 +2,6 @@ test_description='apply same filename' - -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' @@ -26,10 +24,11 @@ diff a/bla/blub/dir/file b/bla/blub/dir/file EOF test_expect_success 'apply --directory -p (1)' ' - git apply --directory=some/sub -p3 --index patch && - test Bello = $(git show :some/sub/dir/file) && - test Bello = $(cat some/sub/dir/file) + echo Bello >expect && + git show :some/sub/dir/file >actual && + test_cmp expect actual && + test_cmp expect some/sub/dir/file ' @@ -37,8 +36,10 @@ test_expect_success 'apply --directory -p (2) ' ' git reset --hard initial && git apply --directory=some/sub/ -p3 --index patch && - test Bello = $(git show :some/sub/dir/file) && - test Bello = $(cat some/sub/dir/file) + echo Bello >expect && + git show :some/sub/dir/file >actual && + test_cmp expect actual && + test_cmp expect some/sub/dir/file ' @@ -55,8 +56,10 @@ EOF test_expect_success 'apply --directory (new file)' ' git reset --hard initial && git apply --directory=some/sub/dir/ --index patch && - test content = $(git show :some/sub/dir/newfile) && - test content = $(cat some/sub/dir/newfile) + echo content >expect && + git show :some/sub/dir/newfile >actual && + test_cmp expect actual && + test_cmp expect some/sub/dir/newfile ' cat > patch << EOF @@ -72,8 +75,10 @@ EOF test_expect_success 'apply --directory -p (new file)' ' git reset --hard initial && git apply -p2 --directory=some/sub/dir/ --index patch && - test content = $(git show :some/sub/dir/newfile2) && - test content = $(cat some/sub/dir/newfile2) + echo content >expect && + git show :some/sub/dir/newfile2 >actual && + test_cmp expect actual && + test_cmp expect some/sub/dir/newfile2 ' cat > patch << EOF @@ -91,7 +96,8 @@ test_expect_success 'apply --directory (delete file)' ' echo content >some/sub/dir/delfile && git add some/sub/dir/delfile && git apply --directory=some/sub/dir/ --index patch && - ! (git ls-files | grep delfile) + git ls-files >out && + ! grep delfile out ' cat > patch << 'EOF' @@ -107,8 +113,10 @@ EOF test_expect_success 'apply --directory (quoted filename)' ' git reset --hard initial && git apply --directory=some/sub/dir/ --index patch && - test content = $(git show :some/sub/dir/quotefile) && - test content = $(cat some/sub/dir/quotefile) + echo content >expect && + git show :some/sub/dir/quotefile >actual && + test_cmp expect actual && + test_cmp expect some/sub/dir/quotefile ' test_done diff --git a/t/t4202-log.sh b/t/t4202-log.sh index 55fac64446..be07407f85 100755 --- a/t/t4202-log.sh +++ b/t/t4202-log.sh @@ -484,7 +484,6 @@ do ) ' done -test_done test_expect_success 'log --author' ' cat >expect <<-\EOF && @@ -2037,7 +2036,8 @@ test_expect_success GPGSM 'log --graph --show-signature for merged tag x509 miss git merge --no-ff -m msg signed_tag_x509_nokey && GNUPGHOME=. git log --graph --show-signature -n1 plain-x509-nokey >actual && grep "^|\\\ merged tag" actual && - grep "^| | gpgsm: certificate not found" actual + grep -e "^| | gpgsm: certificate not found" \ + -e "^| | gpgsm: failed to find the certificate: Not found" actual ' test_expect_success GPGSM 'log --graph --show-signature for merged tag x509 bad signature' ' diff --git a/t/t4216-log-bloom.sh b/t/t4216-log-bloom.sh index cc3cebf672..fa9d32facf 100755 --- a/t/t4216-log-bloom.sh +++ b/t/t4216-log-bloom.sh @@ -48,6 +48,7 @@ graph_read_expect () { header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0 num_commits: $1 chunks: oid_fanout oid_lookup commit_metadata generation_data bloom_indexes bloom_data + options: bloom(1,10,7) read_generation_data EOF test-tool read-graph >actual && test_cmp expect actual diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh index 2fd845187e..a11d61206a 100755 --- a/t/t5300-pack-object.sh +++ b/t/t5300-pack-object.sh @@ -315,8 +315,10 @@ test_expect_success \ git index-pack -o tmp.idx test-3.pack && cmp tmp.idx test-1-${packname_1}.idx && - git index-pack test-3.pack && + git index-pack --promisor=message test-3.pack && cmp test-3.idx test-1-${packname_1}.idx && + echo message >expect && + test_cmp expect test-3.promisor && cat test-2-${packname_2}.pack >test-3.pack && git index-pack -o tmp.idx test-2-${packname_2}.pack && diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh index edb728f77c..fbf0d64578 100755 --- a/t/t5318-commit-graph.sh +++ b/t/t5318-commit-graph.sh @@ -29,12 +29,7 @@ test_expect_success 'setup full repo' ' cd "$TRASH_DIRECTORY/full" && git init && git config core.commitGraph true && - objdir=".git/objects" && - - test_oid_cache <<-EOF - oid_version sha1:1 - oid_version sha256:2 - EOF + objdir=".git/objects" ' test_expect_success POSIXPERM 'tweak umask for modebit tests' ' @@ -69,46 +64,10 @@ test_expect_success 'create commits and repack' ' git repack ' -graph_git_two_modes() { - git -c core.commitGraph=true $1 >output && - git -c core.commitGraph=false $1 >expect && - test_cmp expect output -} - -graph_git_behavior() { - MSG=$1 - DIR=$2 - BRANCH=$3 - COMPARE=$4 - test_expect_success "check normal git operations: $MSG" ' - cd "$TRASH_DIRECTORY/$DIR" && - graph_git_two_modes "log --oneline $BRANCH" && - graph_git_two_modes "log --topo-order $BRANCH" && - graph_git_two_modes "log --graph $COMPARE..$BRANCH" && - graph_git_two_modes "branch -vv" && - graph_git_two_modes "merge-base -a $BRANCH $COMPARE" - ' -} +. "$TEST_DIRECTORY"/lib-commit-graph.sh graph_git_behavior 'no graph' full commits/3 commits/1 -graph_read_expect() { - OPTIONAL="" - NUM_CHUNKS=3 - if test ! -z "$2" - then - OPTIONAL=" $2" - NUM_CHUNKS=$((3 + $(echo "$2" | wc -w))) - fi - cat >expect <<- EOF - header: 43475048 1 $(test_oid oid_version) $NUM_CHUNKS 0 - num_commits: $1 - chunks: oid_fanout oid_lookup commit_metadata$OPTIONAL - EOF - test-tool read-graph >output && - test_cmp expect output -} - test_expect_success 'exit with correct error on bad input to --stdin-commits' ' cd "$TRASH_DIRECTORY/full" && # invalid, non-hex OID @@ -466,10 +425,10 @@ test_expect_success 'warn on improper hash version' ' ) ' -test_expect_success 'lower layers have overflow chunk' ' +test_expect_success TIME_IS_64BIT,TIME_T_IS_64BIT 'lower layers have overflow chunk' ' cd "$TRASH_DIRECTORY/full" && UNIX_EPOCH_ZERO="@0 +0000" && - FUTURE_DATE="@2147483646 +0000" && + FUTURE_DATE="@4147483646 +0000" && rm -f .git/objects/info/commit-graph && test_commit --date "$FUTURE_DATE" future-1 && test_commit --date "$UNIX_EPOCH_ZERO" old-1 && @@ -497,7 +456,7 @@ test_expect_success 'git commit-graph verify' ' cd "$TRASH_DIRECTORY/full" && git rev-parse commits/8 | git -c commitGraph.generationVersion=1 commit-graph write --stdin-commits && git commit-graph verify >output && - graph_read_expect 9 extra_edges + graph_read_expect 9 extra_edges 1 ' NUM_COMMITS=9 @@ -825,10 +784,6 @@ test_expect_success 'set up and verify repo with generation data overflow chunk' objdir=".git/objects" && UNIX_EPOCH_ZERO="@0 +0000" && FUTURE_DATE="@2147483646 +0000" && - test_oid_cache <<-EOF && - oid_version sha1:1 - oid_version sha256:2 - EOF cd "$TRASH_DIRECTORY" && mkdir repo && cd repo && diff --git a/t/t5324-split-commit-graph.sh b/t/t5324-split-commit-graph.sh index 847b809710..669ddc645f 100755 --- a/t/t5324-split-commit-graph.sh +++ b/t/t5324-split-commit-graph.sh @@ -30,10 +30,16 @@ graph_read_expect() { then NUM_BASE=$2 fi + OPTIONS= + if test -z "$3" + then + OPTIONS=" read_generation_data" + fi cat >expect <<- EOF header: 43475048 1 $(test_oid oid_version) 4 $NUM_BASE num_commits: $1 chunks: oid_fanout oid_lookup commit_metadata generation_data + options:$OPTIONS EOF test-tool read-graph >output && test_cmp expect output @@ -508,6 +514,7 @@ test_expect_success 'setup repo for mixed generation commit-graph-chain' ' header: 43475048 1 $(test_oid oid_version) 4 1 num_commits: $NUM_SECOND_LAYER_COMMITS chunks: oid_fanout oid_lookup commit_metadata + options: EOF test_cmp expect output && git commit-graph verify && @@ -540,6 +547,7 @@ test_expect_success 'do not write generation data chunk if not present on existi header: 43475048 1 $(test_oid oid_version) 4 2 num_commits: $NUM_THIRD_LAYER_COMMITS chunks: oid_fanout oid_lookup commit_metadata + options: EOF test_cmp expect output && git commit-graph verify @@ -581,6 +589,7 @@ test_expect_success 'do not write generation data chunk if the topmost remaining header: 43475048 1 $(test_oid oid_version) 4 2 num_commits: $(($NUM_THIRD_LAYER_COMMITS + $NUM_FOURTH_LAYER_COMMITS)) chunks: oid_fanout oid_lookup commit_metadata + options: EOF test_cmp expect output && git commit-graph verify @@ -620,6 +629,7 @@ test_expect_success 'write generation data chunk if topmost remaining layer has header: 43475048 1 $(test_oid oid_version) 5 1 num_commits: $(($NUM_SECOND_LAYER_COMMITS + $NUM_THIRD_LAYER_COMMITS + $NUM_FOURTH_LAYER_COMMITS + $NUM_FIFTH_LAYER_COMMITS)) chunks: oid_fanout oid_lookup commit_metadata generation_data + options: read_generation_data EOF test_cmp expect output ) diff --git a/t/t5328-commit-graph-64bit-time.sh b/t/t5328-commit-graph-64bit-time.sh new file mode 100755 index 0000000000..093f0c067a --- /dev/null +++ b/t/t5328-commit-graph-64bit-time.sh @@ -0,0 +1,66 @@ +#!/bin/sh + +test_description='commit graph with 64-bit timestamps' +. ./test-lib.sh + +if ! test_have_prereq TIME_IS_64BIT || ! test_have_prereq TIME_T_IS_64BIT +then + skip_all='skipping 64-bit timestamp tests' + test_done +fi + +. "$TEST_DIRECTORY"/lib-commit-graph.sh + +UNIX_EPOCH_ZERO="@0 +0000" +FUTURE_DATE="@4147483646 +0000" + +GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS=0 + +test_expect_success 'lower layers have overflow chunk' ' + rm -f .git/objects/info/commit-graph && + test_commit --date "$FUTURE_DATE" future-1 && + test_commit --date "$UNIX_EPOCH_ZERO" old-1 && + git commit-graph write --reachable && + test_commit --date "$FUTURE_DATE" future-2 && + test_commit --date "$UNIX_EPOCH_ZERO" old-2 && + git commit-graph write --reachable --split=no-merge && + test_commit extra && + git commit-graph write --reachable --split=no-merge && + git commit-graph write --reachable && + graph_read_expect 5 "generation_data generation_data_overflow" && + mv .git/objects/info/commit-graph commit-graph-upgraded && + git commit-graph write --reachable && + graph_read_expect 5 "generation_data generation_data_overflow" && + test_cmp .git/objects/info/commit-graph commit-graph-upgraded +' + +graph_git_behavior 'overflow' '' HEAD~2 HEAD + +test_expect_success 'set up and verify repo with generation data overflow chunk' ' + mkdir repo && + cd repo && + git init && + test_commit --date "$UNIX_EPOCH_ZERO" 1 && + test_commit 2 && + test_commit --date "$UNIX_EPOCH_ZERO" 3 && + git commit-graph write --reachable && + graph_read_expect 3 generation_data && + test_commit --date "$FUTURE_DATE" 4 && + test_commit 5 && + test_commit --date "$UNIX_EPOCH_ZERO" 6 && + git branch left && + git reset --hard 3 && + test_commit 7 && + test_commit --date "$FUTURE_DATE" 8 && + test_commit 9 && + git branch right && + git reset --hard 3 && + test_merge M left right && + git commit-graph write --reachable && + graph_read_expect 10 "generation_data generation_data_overflow" && + git commit-graph verify +' + +graph_git_behavior 'overflow 2' repo left right + +test_done diff --git a/t/t5503-tagfollow.sh b/t/t5503-tagfollow.sh index 195fc64dd4..a3c01014b7 100755 --- a/t/t5503-tagfollow.sh +++ b/t/t5503-tagfollow.sh @@ -160,4 +160,68 @@ test_expect_success 'new clone fetch main and tags' ' test_cmp expect actual ' +test_expect_success 'atomic fetch with failing backfill' ' + git init clone3 && + + # We want to test whether a failure when backfilling tags correctly + # aborts the complete transaction when `--atomic` is passed: we should + # neither create the branch nor should we create the tag when either + # one of both fails to update correctly. + # + # To trigger failure we simply abort when backfilling a tag. + write_script clone3/.git/hooks/reference-transaction <<-\EOF && + while read oldrev newrev reference + do + if test "$reference" = refs/tags/tag1 + then + exit 1 + fi + done + EOF + + test_must_fail git -C clone3 fetch --atomic .. $B:refs/heads/something && + test_must_fail git -C clone3 rev-parse --verify refs/heads/something && + test_must_fail git -C clone3 rev-parse --verify refs/tags/tag2 +' + +test_expect_success 'atomic fetch with backfill should use single transaction' ' + git init clone4 && + + # Fetching with the `--atomic` flag should update all references in a + # single transaction, including backfilled tags. We thus expect to see + # a single reference transaction for the created branch and tags. + cat >expected <<-EOF && + prepared + $ZERO_OID $B refs/heads/something + $ZERO_OID $S refs/tags/tag2 + $ZERO_OID $T refs/tags/tag1 + committed + $ZERO_OID $B refs/heads/something + $ZERO_OID $S refs/tags/tag2 + $ZERO_OID $T refs/tags/tag1 + EOF + + write_script clone4/.git/hooks/reference-transaction <<-\EOF && + ( echo "$*" && cat ) >>actual + EOF + + git -C clone4 fetch --atomic .. $B:refs/heads/something && + test_cmp expected clone4/actual +' + +test_expect_success 'backfill failure causes command to fail' ' + git init clone5 && + + # Create a tag that is nested below the tag we are about to fetch via + # the backfill mechanism. This causes a D/F conflict when backfilling + # and should thus cause the command to fail. + empty_blob=$(git -C clone5 hash-object -w --stdin </dev/null) && + git -C clone5 update-ref refs/tags/tag1/nested $empty_blob && + + test_must_fail git -C clone5 fetch .. $B:refs/heads/something && + test $B = $(git -C clone5 rev-parse --verify refs/heads/something) && + test $S = $(git -C clone5 rev-parse --verify tag2) && + test_must_fail git -C clone5 rev-parse --verify tag1 +' + test_done diff --git a/t/t5505-remote.sh b/t/t5505-remote.sh index 9ab315424c..c90cf47acd 100755 --- a/t/t5505-remote.sh +++ b/t/t5505-remote.sh @@ -753,7 +753,9 @@ test_expect_success 'rename a remote' ' ( cd four && git config branch.main.pushRemote origin && - git remote rename origin upstream && + GIT_TRACE2_EVENT=$(pwd)/trace \ + git remote rename --progress origin upstream && + test_region progress "Renaming remote references" trace && grep "pushRemote" .git/config && test -z "$(git for-each-ref refs/remotes/origin)" && test "$(git symbolic-ref refs/remotes/upstream/HEAD)" = "refs/remotes/upstream/main" && diff --git a/t/t5510-fetch.sh b/t/t5510-fetch.sh index ef0da0a63b..48e14e2dab 100755 --- a/t/t5510-fetch.sh +++ b/t/t5510-fetch.sh @@ -343,6 +343,35 @@ test_expect_success 'fetch --atomic --append appends to FETCH_HEAD' ' test_cmp expected atomic/.git/FETCH_HEAD ' +test_expect_success 'fetch --atomic --prune executes a single reference transaction only' ' + test_when_finished "rm -rf \"$D\"/atomic" && + + cd "$D" && + git branch scheduled-for-deletion && + git clone . atomic && + git branch -D scheduled-for-deletion && + git branch new-branch && + head_oid=$(git rev-parse HEAD) && + + # Fetching with the `--atomic` flag should update all references in a + # single transaction. + cat >expected <<-EOF && + prepared + $ZERO_OID $ZERO_OID refs/remotes/origin/scheduled-for-deletion + $ZERO_OID $head_oid refs/remotes/origin/new-branch + committed + $ZERO_OID $ZERO_OID refs/remotes/origin/scheduled-for-deletion + $ZERO_OID $head_oid refs/remotes/origin/new-branch + EOF + + write_script atomic/.git/hooks/reference-transaction <<-\EOF && + ( echo "$*" && cat ) >>actual + EOF + + git -C atomic fetch --atomic --prune origin && + test_cmp expected atomic/actual +' + test_expect_success '--refmap="" ignores configured refspec' ' cd "$TRASH_DIRECTORY" && git clone "$D" remote-refs && diff --git a/t/t5526-fetch-submodules.sh b/t/t5526-fetch-submodules.sh index 840c89cc8b..43dada8544 100755 --- a/t/t5526-fetch-submodules.sh +++ b/t/t5526-fetch-submodules.sh @@ -10,33 +10,122 @@ export GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB pwd=$(pwd) -add_upstream_commit() { +write_expected_sub () { + NEW_HEAD=$1 && + SUPER_HEAD=$2 && + cat >"$pwd/expect.err.sub" <<-EOF + Fetching submodule submodule${SUPER_HEAD:+ at commit $SUPER_HEAD} + From $pwd/submodule + OLD_HEAD..$NEW_HEAD sub -> origin/sub + EOF +} + +write_expected_sub2 () { + NEW_HEAD=$1 && + SUPER_HEAD=$2 && + cat >"$pwd/expect.err.sub2" <<-EOF + Fetching submodule submodule2${SUPER_HEAD:+ at commit $SUPER_HEAD} + From $pwd/submodule2 + OLD_HEAD..$NEW_HEAD sub2 -> origin/sub2 + EOF +} + +write_expected_deep () { + NEW_HEAD=$1 && + SUB_HEAD=$2 && + cat >"$pwd/expect.err.deep" <<-EOF + Fetching submodule submodule/subdir/deepsubmodule${SUB_HEAD:+ at commit $SUB_HEAD} + From $pwd/deepsubmodule + OLD_HEAD..$NEW_HEAD deep -> origin/deep + EOF +} + +write_expected_super () { + NEW_HEAD=$1 && + cat >"$pwd/expect.err.super" <<-EOF + From $pwd/. + OLD_HEAD..$NEW_HEAD super -> origin/super + EOF +} + +# For each submodule in the test setup, this creates a commit and writes +# a file that contains the expected err if that new commit were fetched. +# These output files get concatenated in the right order by +# verify_fetch_result(). +add_submodule_commits () { ( cd submodule && - head1=$(git rev-parse --short HEAD) && echo new >> subfile && test_tick && git add subfile && git commit -m new subfile && - head2=$(git rev-parse --short HEAD) && - echo "Fetching submodule submodule" > ../expect.err && - echo "From $pwd/submodule" >> ../expect.err && - echo " $head1..$head2 sub -> origin/sub" >> ../expect.err + new_head=$(git rev-parse --short HEAD) && + write_expected_sub $new_head ) && ( cd deepsubmodule && - head1=$(git rev-parse --short HEAD) && echo new >> deepsubfile && test_tick && git add deepsubfile && git commit -m new deepsubfile && - head2=$(git rev-parse --short HEAD) && - echo "Fetching submodule submodule/subdir/deepsubmodule" >> ../expect.err - echo "From $pwd/deepsubmodule" >> ../expect.err && - echo " $head1..$head2 deep -> origin/deep" >> ../expect.err + new_head=$(git rev-parse --short HEAD) && + write_expected_deep $new_head ) } +# For each superproject in the test setup, update its submodule, add the +# submodule and create a new commit with the submodule change. +# +# This requires add_submodule_commits() to be called first, otherwise +# the submodules will not have changed and cannot be "git add"-ed. +add_superproject_commits () { + ( + cd submodule && + ( + cd subdir/deepsubmodule && + git fetch && + git checkout -q FETCH_HEAD + ) && + git add subdir/deepsubmodule && + git commit -m "new deep submodule" + ) && + git add submodule && + git commit -m "new submodule" && + super_head=$(git rev-parse --short HEAD) && + sub_head=$(git -C submodule rev-parse --short HEAD) && + write_expected_super $super_head && + write_expected_sub $sub_head +} + +# Verifies that the expected repositories were fetched. This is done by +# concatenating the files expect.err.[super|sub|deep] in the correct +# order and comparing it to the actual stderr. +# +# If a repo should not be fetched in the test, its corresponding +# expect.err file should be rm-ed. +verify_fetch_result () { + ACTUAL_ERR=$1 && + rm -f expect.err.combined && + if test -f expect.err.super + then + cat expect.err.super >>expect.err.combined + fi && + if test -f expect.err.sub + then + cat expect.err.sub >>expect.err.combined + fi && + if test -f expect.err.deep + then + cat expect.err.deep >>expect.err.combined + fi && + if test -f expect.err.sub2 + then + cat expect.err.sub2 >>expect.err.combined + fi && + sed -e 's/[0-9a-f][0-9a-f]*\.\./OLD_HEAD\.\./' "$ACTUAL_ERR" >actual.err.cmp && + test_cmp expect.err.combined actual.err.cmp +} + test_expect_success setup ' mkdir deepsubmodule && ( @@ -68,38 +157,38 @@ test_expect_success setup ' ' test_expect_success "fetch --recurse-submodules recurses into submodules" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && git fetch --recurse-submodules >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "submodule.recurse option triggers recursive fetch" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && git -c submodule.recurse fetch >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "fetch --recurse-submodules -j2 has the same output behaviour" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && GIT_TRACE="$TRASH_DIRECTORY/trace.out" git fetch --recurse-submodules -j2 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err && + verify_fetch_result actual.err && grep "2 tasks" trace.out ' test_expect_success "fetch alone only fetches superproject" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && git fetch >../actual.out 2>../actual.err @@ -124,11 +213,11 @@ test_expect_success "using fetchRecurseSubmodules=true in .gitmodules recurses i git fetch >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "--no-recurse-submodules overrides .gitmodules config" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && git fetch --no-recurse-submodules >../actual.out 2>../actual.err @@ -155,7 +244,7 @@ test_expect_success "--recurse-submodules overrides fetchRecurseSubmodules setti git config --unset submodule.submodule.fetchRecurseSubmodules ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "--quiet propagates to submodules" ' @@ -177,13 +266,13 @@ test_expect_success "--quiet propagates to parallel submodules" ' ' test_expect_success "--dry-run propagates to submodules" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && git fetch --recurse-submodules --dry-run >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "Without --dry-run propagates to submodules" ' @@ -192,22 +281,22 @@ test_expect_success "Without --dry-run propagates to submodules" ' git fetch --recurse-submodules >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "recurseSubmodules=true propagates into submodules" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && git config fetch.recurseSubmodules true && git fetch >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "--recurse-submodules overrides config in submodule" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && ( @@ -217,11 +306,11 @@ test_expect_success "--recurse-submodules overrides config in submodule" ' git fetch --recurse-submodules >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err ' test_expect_success "--no-recurse-submodules overrides config setting" ' - add_upstream_commit && + add_submodule_commits && ( cd downstream && git config fetch.recurseSubmodules true && @@ -246,36 +335,34 @@ test_expect_success "Recursion doesn't happen when no new commits are fetched in ' test_expect_success "Recursion stops when no new submodule commits are fetched" ' - head1=$(git rev-parse --short HEAD) && git add submodule && git commit -m "new submodule" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err.sub && - echo " $head1..$head2 super -> origin/super" >>expect.err.sub && - head -3 expect.err >> expect.err.sub && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && + rm expect.err.deep && ( cd downstream && git fetch >../actual.out 2>../actual.err ) && - test_cmp expect.err.sub actual.err && + verify_fetch_result actual.err && test_must_be_empty actual.out ' test_expect_success "Recursion doesn't happen when new superproject commits don't change any submodules" ' - add_upstream_commit && - head1=$(git rev-parse --short HEAD) && + add_submodule_commits && echo a > file && git add file && git commit -m "new file" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err.file && - echo " $head1..$head2 super -> origin/super" >> expect.err.file && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && + rm expect.err.sub && + rm expect.err.deep && ( cd downstream && git fetch >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err.file actual.err + verify_fetch_result actual.err ' test_expect_success "Recursion picks up config in submodule" ' @@ -287,14 +374,11 @@ test_expect_success "Recursion picks up config in submodule" ' git config fetch.recurseSubmodules true ) ) && - add_upstream_commit && - head1=$(git rev-parse --short HEAD) && + add_submodule_commits && git add submodule && git commit -m "new submodule" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err.sub && - echo " $head1..$head2 super -> origin/super" >> expect.err.sub && - cat expect.err >> expect.err.sub && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && ( cd downstream && git fetch >../actual.out 2>../actual.err && @@ -303,60 +387,23 @@ test_expect_success "Recursion picks up config in submodule" ' git config --unset fetch.recurseSubmodules ) ) && - test_cmp expect.err.sub actual.err && + verify_fetch_result actual.err && test_must_be_empty actual.out ' test_expect_success "Recursion picks up all submodules when necessary" ' - add_upstream_commit && - ( - cd submodule && - ( - cd subdir/deepsubmodule && - git fetch && - git checkout -q FETCH_HEAD - ) && - head1=$(git rev-parse --short HEAD^) && - git add subdir/deepsubmodule && - git commit -m "new deepsubmodule" && - head2=$(git rev-parse --short HEAD) && - echo "Fetching submodule submodule" > ../expect.err.sub && - echo "From $pwd/submodule" >> ../expect.err.sub && - echo " $head1..$head2 sub -> origin/sub" >> ../expect.err.sub - ) && - head1=$(git rev-parse --short HEAD) && - git add submodule && - git commit -m "new submodule" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err.2 && - echo " $head1..$head2 super -> origin/super" >> expect.err.2 && - cat expect.err.sub >> expect.err.2 && - tail -3 expect.err >> expect.err.2 && + add_submodule_commits && + add_superproject_commits && ( cd downstream && git fetch >../actual.out 2>../actual.err ) && - test_cmp expect.err.2 actual.err && + verify_fetch_result actual.err && test_must_be_empty actual.out ' test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no new commits are fetched in the superproject (and ignores config)" ' - add_upstream_commit && - ( - cd submodule && - ( - cd subdir/deepsubmodule && - git fetch && - git checkout -q FETCH_HEAD - ) && - head1=$(git rev-parse --short HEAD^) && - git add subdir/deepsubmodule && - git commit -m "new deepsubmodule" && - head2=$(git rev-parse --short HEAD) && - echo Fetching submodule submodule > ../expect.err.sub && - echo "From $pwd/submodule" >> ../expect.err.sub && - echo " $head1..$head2 sub -> origin/sub" >> ../expect.err.sub - ) && + add_submodule_commits && ( cd downstream && git config fetch.recurseSubmodules true && @@ -368,15 +415,8 @@ test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no ne ' test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necessary (and ignores config)" ' - head1=$(git rev-parse --short HEAD) && - git add submodule && - git commit -m "new submodule" && - head2=$(git rev-parse --short HEAD) && - tail -3 expect.err > expect.err.deepsub && - echo "From $pwd/." > expect.err && - echo " $head1..$head2 super -> origin/super" >>expect.err && - cat expect.err.sub >> expect.err && - cat expect.err.deepsub >> expect.err && + add_submodule_commits && + add_superproject_commits && ( cd downstream && git config fetch.recurseSubmodules false && @@ -392,24 +432,165 @@ test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necess ) ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err + verify_fetch_result actual.err +' + +# These tests verify that we can fetch submodules that aren't in the +# index. +# +# First, test the simple case where the index is empty and we only fetch +# submodules that are not in the index. +test_expect_success 'setup downstream branch without submodules' ' + ( + cd downstream && + git checkout --recurse-submodules -b no-submodules && + git rm .gitmodules && + git rm submodule && + git commit -m "no submodules" && + git checkout --recurse-submodules super + ) +' + +test_expect_success "'--recurse-submodules=on-demand' should fetch submodule commits if the submodule is changed but the index has no submodules" ' + add_submodule_commits && + add_superproject_commits && + # Fetch the new superproject commit + ( + cd downstream && + git switch --recurse-submodules no-submodules && + git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err + ) && + super_head=$(git rev-parse --short HEAD) && + sub_head=$(git -C submodule rev-parse --short HEAD) && + deep_head=$(git -C submodule/subdir/deepsubmodule rev-parse --short HEAD) && + + # assert that these are fetched from commits, not the index + write_expected_sub $sub_head $super_head && + write_expected_deep $deep_head $sub_head && + + test_must_be_empty actual.out && + verify_fetch_result actual.err +' + +test_expect_success "'--recurse-submodules' should fetch submodule commits if the submodule is changed but the index has no submodules" ' + add_submodule_commits && + add_superproject_commits && + # Fetch the new superproject commit + ( + cd downstream && + git switch --recurse-submodules no-submodules && + git fetch --recurse-submodules >../actual.out 2>../actual.err + ) && + super_head=$(git rev-parse --short HEAD) && + sub_head=$(git -C submodule rev-parse --short HEAD) && + deep_head=$(git -C submodule/subdir/deepsubmodule rev-parse --short HEAD) && + + # assert that these are fetched from commits, not the index + write_expected_sub $sub_head $super_head && + write_expected_deep $deep_head $sub_head && + + test_must_be_empty actual.out && + verify_fetch_result actual.err +' + +test_expect_success "'--recurse-submodules' should ignore changed, inactive submodules" ' + add_submodule_commits && + add_superproject_commits && + + # Fetch the new superproject commit + ( + cd downstream && + git switch --recurse-submodules no-submodules && + git -c submodule.submodule.active=false fetch --recurse-submodules >../actual.out 2>../actual.err + ) && + test_must_be_empty actual.out && + super_head=$(git rev-parse --short HEAD) && + write_expected_super $super_head && + # Neither should be fetched because the submodule is inactive + rm expect.err.sub && + rm expect.err.deep && + verify_fetch_result actual.err +' + +# Now that we know we can fetch submodules that are not in the index, +# test that we can fetch index and non-index submodules in the same +# operation. +test_expect_success 'setup downstream branch with other submodule' ' + mkdir submodule2 && + ( + cd submodule2 && + git init && + echo sub2content >sub2file && + git add sub2file && + git commit -a -m new && + git branch -M sub2 + ) && + git checkout -b super-sub2-only && + git submodule add "$pwd/submodule2" submodule2 && + git commit -m "add sub2" && + git checkout super && + ( + cd downstream && + git fetch --recurse-submodules origin && + git checkout super-sub2-only && + # Explicitly run "git submodule update" because sub2 is new + # and has not been cloned. + git submodule update --init && + git checkout --recurse-submodules super + ) +' + +test_expect_success "'--recurse-submodules' should fetch submodule commits in changed submodules and the index" ' + test_when_finished "rm expect.err.sub2" && + # Create new commit in origin/super + add_submodule_commits && + add_superproject_commits && + + # Create new commit in origin/super-sub2-only + git checkout super-sub2-only && + ( + cd submodule2 && + test_commit --no-tag foo + ) && + git add submodule2 && + git commit -m "new submodule2" && + + git checkout super && + ( + cd downstream && + git fetch --recurse-submodules >../actual.out 2>../actual.err + ) && + test_must_be_empty actual.out && + sub2_head=$(git -C submodule2 rev-parse --short HEAD) && + super_head=$(git rev-parse --short super) && + super_sub2_only_head=$(git rev-parse --short super-sub2-only) && + write_expected_sub2 $sub2_head $super_sub2_only_head && + + # write_expected_super cannot handle >1 branch. Since this is a + # one-off, construct expect.err.super manually. + cat >"$pwd/expect.err.super" <<-EOF && + From $pwd/. + OLD_HEAD..$super_head super -> origin/super + OLD_HEAD..$super_sub2_only_head super-sub2-only -> origin/super-sub2-only + EOF + verify_fetch_result actual.err ' test_expect_success "'--recurse-submodules=on-demand' stops when no new submodule commits are found in the superproject (and ignores config)" ' - add_upstream_commit && - head1=$(git rev-parse --short HEAD) && + add_submodule_commits && echo a >> file && git add file && git commit -m "new file" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err.file && - echo " $head1..$head2 super -> origin/super" >> expect.err.file && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && + rm expect.err.sub && + rm expect.err.deep && ( cd downstream && git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err.file actual.err + verify_fetch_result actual.err ' test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config" ' @@ -417,15 +598,13 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config cd downstream && git fetch --recurse-submodules ) && - add_upstream_commit && + add_submodule_commits && git config --global fetch.recurseSubmodules false && - head1=$(git rev-parse --short HEAD) && git add submodule && git commit -m "new submodule" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err.2 && - echo " $head1..$head2 super -> origin/super" >>expect.err.2 && - head -3 expect.err >> expect.err.2 && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && + rm expect.err.deep && ( cd downstream && git config fetch.recurseSubmodules on-demand && @@ -437,7 +616,7 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config git config --unset fetch.recurseSubmodules ) && test_must_be_empty actual.out && - test_cmp expect.err.2 actual.err + verify_fetch_result actual.err ' test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' overrides fetch.recurseSubmodules" ' @@ -445,15 +624,13 @@ test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' override cd downstream && git fetch --recurse-submodules ) && - add_upstream_commit && + add_submodule_commits && git config fetch.recurseSubmodules false && - head1=$(git rev-parse --short HEAD) && git add submodule && git commit -m "new submodule" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err.2 && - echo " $head1..$head2 super -> origin/super" >>expect.err.2 && - head -3 expect.err >> expect.err.2 && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && + rm expect.err.deep && ( cd downstream && git config submodule.submodule.fetchRecurseSubmodules on-demand && @@ -465,7 +642,7 @@ test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' override git config --unset submodule.submodule.fetchRecurseSubmodules ) && test_must_be_empty actual.out && - test_cmp expect.err.2 actual.err + verify_fetch_result actual.err ' test_expect_success "don't fetch submodule when newly recorded commits are already present" ' @@ -473,18 +650,19 @@ test_expect_success "don't fetch submodule when newly recorded commits are alrea cd submodule && git checkout -q HEAD^^ ) && - head1=$(git rev-parse --short HEAD) && git add submodule && git commit -m "submodule rewound" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." > expect.err && - echo " $head1..$head2 super -> origin/super" >> expect.err && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && + rm expect.err.sub && + # This file does not exist, but rm -f for readability + rm -f expect.err.deep && ( cd downstream && git fetch >../actual.out 2>../actual.err ) && test_must_be_empty actual.out && - test_cmp expect.err actual.err && + verify_fetch_result actual.err && ( cd submodule && git checkout -q sub @@ -496,15 +674,13 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .git cd downstream && git fetch --recurse-submodules ) && - add_upstream_commit && - head1=$(git rev-parse --short HEAD) && + add_submodule_commits && git add submodule && git rm .gitmodules && git commit -m "new submodule without .gitmodules" && - head2=$(git rev-parse --short HEAD) && - echo "From $pwd/." >expect.err.2 && - echo " $head1..$head2 super -> origin/super" >>expect.err.2 && - head -3 expect.err >>expect.err.2 && + new_head=$(git rev-parse --short HEAD) && + write_expected_super $new_head && + rm expect.err.deep && ( cd downstream && rm .gitmodules && @@ -520,7 +696,7 @@ test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .git git reset --hard ) && test_must_be_empty actual.out && - test_cmp expect.err.2 actual.err && + verify_fetch_result actual.err && git checkout HEAD^ -- .gitmodules && git add .gitmodules && git commit -m "new submodule restored .gitmodules" @@ -842,4 +1018,111 @@ test_expect_success 'recursive fetch after deinit a submodule' ' test_cmp expect actual ' +test_expect_success 'setup repo with upstreams that share a submodule name' ' + mkdir same-name-1 && + ( + cd same-name-1 && + git init -b main && + test_commit --no-tag a + ) && + git clone same-name-1 same-name-2 && + # same-name-1 and same-name-2 both add a submodule with the + # name "submodule" + ( + cd same-name-1 && + mkdir submodule && + git -C submodule init -b main && + test_commit -C submodule --no-tag a1 && + git submodule add "$pwd/same-name-1/submodule" && + git add submodule && + git commit -m "super-a1" + ) && + ( + cd same-name-2 && + mkdir submodule && + git -C submodule init -b main && + test_commit -C submodule --no-tag a2 && + git submodule add "$pwd/same-name-2/submodule" && + git add submodule && + git commit -m "super-a2" + ) && + git clone same-name-1 -o same-name-1 same-name-downstream && + ( + cd same-name-downstream && + git remote add same-name-2 ../same-name-2 && + git fetch --all && + # init downstream with same-name-1 + git submodule update --init + ) +' + +test_expect_success 'fetch --recurse-submodules updates name-conflicted, populated submodule' ' + test_when_finished "git -C same-name-downstream checkout main" && + ( + cd same-name-1 && + test_commit -C submodule --no-tag b1 && + git add submodule && + git commit -m "super-b1" + ) && + ( + cd same-name-2 && + test_commit -C submodule --no-tag b2 && + git add submodule && + git commit -m "super-b2" + ) && + ( + cd same-name-downstream && + # even though the .gitmodules is correct, we cannot + # fetch from same-name-2 + git checkout same-name-2/main && + git fetch --recurse-submodules same-name-1 && + test_must_fail git fetch --recurse-submodules same-name-2 + ) && + super_head1=$(git -C same-name-1 rev-parse HEAD) && + git -C same-name-downstream cat-file -e $super_head1 && + + super_head2=$(git -C same-name-2 rev-parse HEAD) && + git -C same-name-downstream cat-file -e $super_head2 && + + sub_head1=$(git -C same-name-1/submodule rev-parse HEAD) && + git -C same-name-downstream/submodule cat-file -e $sub_head1 && + + sub_head2=$(git -C same-name-2/submodule rev-parse HEAD) && + test_must_fail git -C same-name-downstream/submodule cat-file -e $sub_head2 +' + +test_expect_success 'fetch --recurse-submodules updates name-conflicted, unpopulated submodule' ' + ( + cd same-name-1 && + test_commit -C submodule --no-tag c1 && + git add submodule && + git commit -m "super-c1" + ) && + ( + cd same-name-2 && + test_commit -C submodule --no-tag c2 && + git add submodule && + git commit -m "super-c2" + ) && + ( + cd same-name-downstream && + git checkout main && + git rm .gitmodules && + git rm submodule && + git commit -m "no submodules" && + git fetch --recurse-submodules same-name-1 + ) && + head1=$(git -C same-name-1/submodule rev-parse HEAD) && + head2=$(git -C same-name-2/submodule rev-parse HEAD) && + ( + cd same-name-downstream/.git/modules/submodule && + # The submodule has core.worktree pointing to the "git + # rm"-ed directory, overwrite the invalid value. See + # comment in get_fetch_task_from_changed() for more + # information. + git --work-tree=. cat-file -e $head1 && + test_must_fail git --work-tree=. cat-file -e $head2 + ) +' + test_done diff --git a/t/t5537-fetch-shallow.sh b/t/t5537-fetch-shallow.sh index 11d5ea54a9..92948de7a0 100755 --- a/t/t5537-fetch-shallow.sh +++ b/t/t5537-fetch-shallow.sh @@ -161,6 +161,15 @@ test_expect_success 'fetch --update-shallow' ' ) ' +test_expect_success 'fetch --update-shallow into a repo with submodules' ' + git init a-submodule && + test_commit -C a-submodule foo && + git init repo-with-sub && + git -C repo-with-sub submodule add ../a-submodule a-submodule && + git -C repo-with-sub commit -m "added submodule" && + git -C repo-with-sub fetch --update-shallow ../shallow/.git refs/heads/*:refs/remotes/shallow/* +' + test_expect_success 'fetch --update-shallow (with fetch.writeCommitGraph)' ' ( cd shallow && diff --git a/t/t6005-rev-list-count.sh b/t/t6005-rev-list-count.sh index 86542c650e..e960049f64 100755 --- a/t/t6005-rev-list-count.sh +++ b/t/t6005-rev-list-count.sh @@ -2,7 +2,6 @@ test_description='git rev-list --max-count and --skip test' -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' @@ -14,39 +13,39 @@ test_expect_success 'setup' ' ' test_expect_success 'no options' ' - test $(git rev-list HEAD | wc -l) = 5 + test_stdout_line_count = 5 git rev-list HEAD ' test_expect_success '--max-count' ' - test $(git rev-list HEAD --max-count=0 | wc -l) = 0 && - test $(git rev-list HEAD --max-count=3 | wc -l) = 3 && - test $(git rev-list HEAD --max-count=5 | wc -l) = 5 && - test $(git rev-list HEAD --max-count=10 | wc -l) = 5 + test_stdout_line_count = 0 git rev-list HEAD --max-count=0 && + test_stdout_line_count = 3 git rev-list HEAD --max-count=3 && + test_stdout_line_count = 5 git rev-list HEAD --max-count=5 && + test_stdout_line_count = 5 git rev-list HEAD --max-count=10 ' test_expect_success '--max-count all forms' ' - test $(git rev-list HEAD --max-count=1 | wc -l) = 1 && - test $(git rev-list HEAD -1 | wc -l) = 1 && - test $(git rev-list HEAD -n1 | wc -l) = 1 && - test $(git rev-list HEAD -n 1 | wc -l) = 1 + test_stdout_line_count = 1 git rev-list HEAD --max-count=1 && + test_stdout_line_count = 1 git rev-list HEAD -1 && + test_stdout_line_count = 1 git rev-list HEAD -n1 && + test_stdout_line_count = 1 git rev-list HEAD -n 1 ' test_expect_success '--skip' ' - test $(git rev-list HEAD --skip=0 | wc -l) = 5 && - test $(git rev-list HEAD --skip=3 | wc -l) = 2 && - test $(git rev-list HEAD --skip=5 | wc -l) = 0 && - test $(git rev-list HEAD --skip=10 | wc -l) = 0 + test_stdout_line_count = 5 git rev-list HEAD --skip=0 && + test_stdout_line_count = 2 git rev-list HEAD --skip=3 && + test_stdout_line_count = 0 git rev-list HEAD --skip=5 && + test_stdout_line_count = 0 git rev-list HEAD --skip=10 ' test_expect_success '--skip --max-count' ' - test $(git rev-list HEAD --skip=0 --max-count=0 | wc -l) = 0 && - test $(git rev-list HEAD --skip=0 --max-count=10 | wc -l) = 5 && - test $(git rev-list HEAD --skip=3 --max-count=0 | wc -l) = 0 && - test $(git rev-list HEAD --skip=3 --max-count=1 | wc -l) = 1 && - test $(git rev-list HEAD --skip=3 --max-count=2 | wc -l) = 2 && - test $(git rev-list HEAD --skip=3 --max-count=10 | wc -l) = 2 && - test $(git rev-list HEAD --skip=5 --max-count=10 | wc -l) = 0 && - test $(git rev-list HEAD --skip=10 --max-count=10 | wc -l) = 0 + test_stdout_line_count = 0 git rev-list HEAD --skip=0 --max-count=0 && + test_stdout_line_count = 5 git rev-list HEAD --skip=0 --max-count=10 && + test_stdout_line_count = 0 git rev-list HEAD --skip=3 --max-count=0 && + test_stdout_line_count = 1 git rev-list HEAD --skip=3 --max-count=1 && + test_stdout_line_count = 2 git rev-list HEAD --skip=3 --max-count=2 && + test_stdout_line_count = 2 git rev-list HEAD --skip=3 --max-count=10 && + test_stdout_line_count = 0 git rev-list HEAD --skip=5 --max-count=10 && + test_stdout_line_count = 0 git rev-list HEAD --skip=10 --max-count=10 ' test_done diff --git a/t/t6012-rev-list-simplify.sh b/t/t6012-rev-list-simplify.sh index 63fcccec32..de1e87f162 100755 --- a/t/t6012-rev-list-simplify.sh +++ b/t/t6012-rev-list-simplify.sh @@ -12,7 +12,9 @@ note () { } unnote () { - git name-rev --tags --annotate-stdin | sed -e "s|$OID_REGEX (tags/\([^)]*\)) |\1 |g" + test_when_finished "rm -f tmp" && + git name-rev --tags --annotate-stdin >tmp && + sed -e "s|$OID_REGEX (tags/\([^)]*\)) |\1 |g" <tmp } # @@ -111,8 +113,8 @@ check_outcome () { shift && param="$*" && test_expect_$outcome "log $param" ' - git log --pretty="$FMT" --parents $param | - unnote >actual && + git log --pretty="$FMT" --parents $param >out && + unnote >actual <out && sed -e "s/^.* \([^ ]*\) .*/\1/" >check <actual && test_cmp expect check ' @@ -151,8 +153,8 @@ check_result 'L K I H G B' --exclude-first-parent-only --first-parent L ^F check_result 'E C B A' --full-history E -- lost test_expect_success 'full history simplification without parent' ' printf "%s\n" E C B A >expect && - git log --pretty="$FMT" --full-history E -- lost | - unnote >actual && + git log --pretty="$FMT" --full-history E -- lost >out && + unnote >actual <out && sed -e "s/^.* \([^ ]*\) .*/\1/" >check <actual && test_cmp expect check ' diff --git a/t/t6020-bundle-misc.sh b/t/t6020-bundle-misc.sh index b13e8a52a9..ed95d19542 100755 --- a/t/t6020-bundle-misc.sh +++ b/t/t6020-bundle-misc.sh @@ -475,4 +475,78 @@ test_expect_success 'clone from bundle' ' test_cmp expect actual ' +test_expect_success 'unfiltered bundle with --objects' ' + git bundle create all-objects.bdl \ + --all --objects && + git bundle create all.bdl \ + --all && + + # Compare the headers of these files. + sed -n -e "/^$/q" -e "p" all.bdl >expect && + sed -n -e "/^$/q" -e "p" all-objects.bdl >actual && + test_cmp expect actual +' + +for filter in "blob:none" "tree:0" "tree:1" "blob:limit=100" +do + test_expect_success "filtered bundle: $filter" ' + test_when_finished rm -rf .git/objects/pack cloned unbundled && + git bundle create partial.bdl \ + --all \ + --filter=$filter && + + git bundle verify partial.bdl >unfiltered && + make_user_friendly_and_stable_output <unfiltered >actual && + + cat >expect <<-EOF && + The bundle contains these 10 refs: + <COMMIT-P> refs/heads/main + <COMMIT-N> refs/heads/release + <COMMIT-D> refs/heads/topic/1 + <COMMIT-H> refs/heads/topic/2 + <COMMIT-D> refs/pull/1/head + <COMMIT-G> refs/pull/2/head + <TAG-1> refs/tags/v1 + <TAG-2> refs/tags/v2 + <TAG-3> refs/tags/v3 + <COMMIT-P> HEAD + The bundle uses this filter: $filter + The bundle records a complete history. + EOF + test_cmp expect actual && + + test_config uploadpack.allowfilter 1 && + test_config uploadpack.allowanysha1inwant 1 && + git clone --no-local --filter=$filter --bare "file://$(pwd)" cloned && + + git init unbundled && + git -C unbundled bundle unbundle ../partial.bdl >ref-list.txt && + ls unbundled/.git/objects/pack/pack-*.promisor >promisor && + test_line_count = 1 promisor && + + # Count the same number of reachable objects. + reflist=$(git for-each-ref --format="%(objectname)") && + git rev-list --objects --filter=$filter --missing=allow-any \ + $reflist >expect && + for repo in cloned unbundled + do + git -C $repo rev-list --objects --missing=allow-any \ + $reflist >actual && + test_cmp expect actual || return 1 + done + ' +done + +# NEEDSWORK: 'git clone --bare' should be able to clone from a filtered +# bundle, but that requires a change to promisor/filter config options. +# For now, we fail gracefully with a helpful error. This behavior can be +# changed in the future to succeed as much as possible. +test_expect_success 'cloning from filtered bundle has useful error' ' + git bundle create partial.bdl \ + --all \ + --filter=blob:none && + test_must_fail git clone --bare partial.bdl partial 2>err && + grep "cannot clone from filtered bundle" err +' + test_done diff --git a/t/t6102-rev-list-unexpected-objects.sh b/t/t6102-rev-list-unexpected-objects.sh index 6f0902b863..cf0195e826 100755 --- a/t/t6102-rev-list-unexpected-objects.sh +++ b/t/t6102-rev-list-unexpected-objects.sh @@ -17,8 +17,13 @@ test_expect_success 'setup unexpected non-blob entry' ' broken_tree="$(git hash-object -w --literally -t tree broken-tree)" ' -test_expect_failure 'traverse unexpected non-blob entry (lone)' ' - test_must_fail git rev-list --objects $broken_tree +test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob entry (lone)' ' + sed "s/Z$//" >expect <<-EOF && + $broken_tree Z + $tree foo + EOF + git rev-list --objects $broken_tree >actual && + test_cmp expect actual ' test_expect_success 'traverse unexpected non-blob entry (seen)' ' @@ -116,8 +121,8 @@ test_expect_success 'setup unexpected non-blob tag' ' tag=$(git hash-object -w --literally -t tag broken-tag) ' -test_expect_failure 'traverse unexpected non-blob tag (lone)' ' - test_must_fail git rev-list --objects $tag +test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob tag (lone)' ' + git rev-list --objects $tag ' test_expect_success 'traverse unexpected non-blob tag (seen)' ' diff --git a/t/t6120-describe.sh b/t/t6120-describe.sh index 9781b92aed..9a35e783a7 100755 --- a/t/t6120-describe.sh +++ b/t/t6120-describe.sh @@ -488,6 +488,124 @@ test_expect_success 'name-rev covers all conditions while looking at parents' ' ) ' +# A-B-C-D-E-main +# +# Where C has a non-monotonically increasing commit timestamp w.r.t. other +# commits +test_expect_success 'non-monotonic commit dates setup' ' + UNIX_EPOCH_ZERO="@0 +0000" && + git init non-monotonic && + test_commit -C non-monotonic A && + test_commit -C non-monotonic --no-tag B && + test_commit -C non-monotonic --no-tag --date "$UNIX_EPOCH_ZERO" C && + test_commit -C non-monotonic D && + test_commit -C non-monotonic E +' + +test_expect_success 'name-rev with commitGraph handles non-monotonic timestamps' ' + test_config -C non-monotonic core.commitGraph true && + ( + cd non-monotonic && + + git commit-graph write --reachable && + + echo "main~3 tags/D~2" >expect && + git name-rev --tags main~3 >actual && + + test_cmp expect actual + ) +' + +test_expect_success 'name-rev --all works with non-monotonic timestamps' ' + test_config -C non-monotonic core.commitGraph false && + ( + cd non-monotonic && + + rm -rf .git/info/commit-graph* && + + cat >tags <<-\EOF && + tags/E + tags/D + tags/D~1 + tags/D~2 + tags/A + EOF + + git log --pretty=%H >revs && + + paste -d" " revs tags | sort >expect && + + git name-rev --tags --all | sort >actual && + test_cmp expect actual + ) +' + +test_expect_success 'name-rev --annotate-stdin works with non-monotonic timestamps' ' + test_config -C non-monotonic core.commitGraph false && + ( + cd non-monotonic && + + rm -rf .git/info/commit-graph* && + + cat >expect <<-\EOF && + E + D + D~1 + D~2 + A + EOF + + git log --pretty=%H >revs && + git name-rev --tags --annotate-stdin --name-only <revs >actual && + test_cmp expect actual + ) +' + +test_expect_success 'name-rev --all works with commitGraph' ' + test_config -C non-monotonic core.commitGraph true && + ( + cd non-monotonic && + + git commit-graph write --reachable && + + cat >tags <<-\EOF && + tags/E + tags/D + tags/D~1 + tags/D~2 + tags/A + EOF + + git log --pretty=%H >revs && + + paste -d" " revs tags | sort >expect && + + git name-rev --tags --all | sort >actual && + test_cmp expect actual + ) +' + +test_expect_success 'name-rev --annotate-stdin works with commitGraph' ' + test_config -C non-monotonic core.commitGraph true && + ( + cd non-monotonic && + + git commit-graph write --reachable && + + cat >expect <<-\EOF && + E + D + D~1 + D~2 + A + EOF + + git log --pretty=%H >revs && + git name-rev --tags --annotate-stdin --name-only <revs >actual && + test_cmp expect actual + ) +' + # B # o # \ diff --git a/t/t6407-merge-binary.sh b/t/t6407-merge-binary.sh index 8e6241f92e..0753fc95f4 100755 --- a/t/t6407-merge-binary.sh +++ b/t/t6407-merge-binary.sh @@ -43,14 +43,9 @@ test_expect_success resolve ' rm -f a* m* && git reset --hard anchor && - if git merge -s resolve main - then - echo Oops, should not have succeeded - false - else - git ls-files -s >current && - test_cmp expect current - fi + test_must_fail git merge -s resolve main && + git ls-files -s >current && + test_cmp expect current ' test_expect_success recursive ' @@ -58,14 +53,9 @@ test_expect_success recursive ' rm -f a* m* && git reset --hard anchor && - if git merge -s recursive main - then - echo Oops, should not have succeeded - false - else - git ls-files -s >current && - test_cmp expect current - fi + test_must_fail git merge -s recursive main && + git ls-files -s >current && + test_cmp expect current ' test_done diff --git a/t/t6423-merge-rename-directories.sh b/t/t6423-merge-rename-directories.sh index 5b81a130e9..479db32cd6 100755 --- a/t/t6423-merge-rename-directories.sh +++ b/t/t6423-merge-rename-directories.sh @@ -4421,14 +4421,14 @@ test_setup_12c1 () { git checkout A && git mv node2/ node1/ && - for i in `git ls-files`; do echo side A >>$i; done && + for i in $(git ls-files); do echo side A >>$i; done && git add -u && test_tick && git commit -m "A" && git checkout B && git mv node1/ node2/ && - for i in `git ls-files`; do echo side B >>$i; done && + for i in $(git ls-files); do echo side B >>$i; done && git add -u && test_tick && git commit -m "B" @@ -4511,7 +4511,7 @@ test_setup_12c2 () { git checkout A && git mv node2/ node1/ && - for i in `git ls-files`; do echo side A >>$i; done && + for i in $(git ls-files); do echo side A >>$i; done && git add -u && echo leaf5 >node1/leaf5 && git add node1/leaf5 && @@ -4520,7 +4520,7 @@ test_setup_12c2 () { git checkout B && git mv node1/ node2/ && - for i in `git ls-files`; do echo side B >>$i; done && + for i in $(git ls-files); do echo side B >>$i; done && git add -u && echo leaf6 >node2/leaf6 && git add node2/leaf6 && @@ -4759,7 +4759,7 @@ test_setup_12f () { echo g >dir/subdir/tweaked/g && echo h >dir/subdir/tweaked/h && test_seq 20 30 >dir/subdir/tweaked/Makefile && - for i in `test_seq 1 88`; do + for i in $(test_seq 1 88); do echo content $i >dir/unchanged/file_$i done && git add . && diff --git a/t/t6428-merge-conflicts-sparse.sh b/t/t6428-merge-conflicts-sparse.sh index 7e8bf497f8..142c9aaabc 100755 --- a/t/t6428-merge-conflicts-sparse.sh +++ b/t/t6428-merge-conflicts-sparse.sh @@ -112,7 +112,7 @@ test_expect_success 'conflicting entries written to worktree even if sparse' ' ) ' -test_expect_merge_algorithm failure success 'present-despite-SKIP_WORKTREE handled reasonably' ' +test_expect_success 'present-despite-SKIP_WORKTREE handled reasonably' ' test_setup_numerals in_the_way && ( cd numerals_in_the_way && @@ -132,26 +132,13 @@ test_expect_merge_algorithm failure success 'present-despite-SKIP_WORKTREE handl test_must_fail git merge -s recursive B^0 && - git ls-files -t >index_files && - test_cmp expected-index index_files && + test_path_is_missing .git/MERGE_HEAD && - test_path_is_file README && test_path_is_file numerals && - test_cmp expected-merge numerals && - - # There should still be a file with "foobar" in it - grep foobar * && - - # 5 other files: - # * expected-merge - # * expected-index - # * index_files - # * others - # * whatever name was given to the numerals file that had - # "foobar" in it - git ls-files -o >others && - test_line_count = 5 others + # numerals should still have "foobar" in it + echo foobar >expect && + test_cmp expect numerals ) ' diff --git a/t/t7012-skip-worktree-writing.sh b/t/t7012-skip-worktree-writing.sh index a1080b94e3..cb9f1a6981 100755 --- a/t/t7012-skip-worktree-writing.sh +++ b/t/t7012-skip-worktree-writing.sh @@ -171,50 +171,20 @@ test_expect_success 'stash restore in sparse checkout' ' # Put a file in the working directory in the way echo in the way >modified && - git stash apply && + test_must_fail git stash apply 2>error&& - # Ensure stash vivifies modifies paths... - cat >expect <<-EOF && - H addme - H modified - H removeme - H subdir/A - S untouched - EOF - git ls-files -t >actual && - test_cmp expect actual && + grep "changes.*would be overwritten by merge" error && - # ...and that the paths show up in status as changed... - cat >expect <<-EOF && - A addme - M modified - D removeme - M subdir/A - ?? actual - ?? expect - ?? modified.stash.XXXXXX - EOF - git status --porcelain | \ - sed -e s/stash......./stash.XXXXXX/ >actual && - test_cmp expect actual && + echo in the way >expect && + test_cmp expect modified && + git diff --quiet HEAD ":!modified" && # ...and that working directory reflects the files correctly - test_path_is_file addme && + test_path_is_missing addme && test_path_is_file modified && test_path_is_missing removeme && test_path_is_file subdir/A && - test_path_is_missing untouched && - - # ...including that we have the expected "modified" file... - cat >expect <<-EOF && - modified - tweaked - EOF - test_cmp expect modified && - - # ...and that the other "modified" file is still present... - echo in the way >expect && - test_cmp expect modified.stash.* + test_path_is_missing untouched ) ' diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh index a0c123b0a7..ca90ee805e 100755 --- a/t/t7063-status-untracked-cache.sh +++ b/t/t7063-status-untracked-cache.sh @@ -90,6 +90,9 @@ test_expect_success 'setup' ' cd worktree && mkdir done dtwo dthree && touch one two three done/one dtwo/two dthree/three && + test-tool chmtime =-300 one two three done/one dtwo/two dthree/three && + test-tool chmtime =-300 done dtwo dthree && + test-tool chmtime =-300 . && git add one two done/one && : >.git/info/exclude && git update-index --untracked-cache && @@ -142,7 +145,6 @@ two EOF test_expect_success 'status first time (empty cache)' ' - avoid_racy && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ git status --porcelain >../actual && @@ -166,7 +168,6 @@ test_expect_success 'untracked cache after first status' ' ' test_expect_success 'status second time (fully populated cache)' ' - avoid_racy && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ git status --porcelain >../actual && @@ -190,8 +191,8 @@ test_expect_success 'untracked cache after second status' ' ' test_expect_success 'modify in root directory, one dir invalidation' ' - avoid_racy && : >four && + test-tool chmtime =-240 four && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ git status --porcelain >../actual && @@ -241,7 +242,6 @@ EOF ' test_expect_success 'new .gitignore invalidates recursively' ' - avoid_racy && echo four >.gitignore && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ @@ -292,7 +292,6 @@ EOF ' test_expect_success 'new info/exclude invalidates everything' ' - avoid_racy && echo three >>.git/info/exclude && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ @@ -520,14 +519,14 @@ test_expect_success 'create/modify files, some of which are gitignored' ' echo three >done/three && # three is gitignored echo four >done/four && # four is gitignored at a higher level echo five >done/five && # five is not gitignored - echo test >base && #we need to ensure that the root dir is touched - rm base && + test-tool chmtime =-180 done/two done/three done/four done/five done && + # we need to ensure that the root dir is touched (in the past); + test-tool chmtime =-180 . && sync_mtime ' test_expect_success 'test sparse status with untracked cache' ' : >../trace.output && - avoid_racy && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ git status --porcelain >../status.actual && iuc status --porcelain >../status.iuc && @@ -570,7 +569,6 @@ EOF ' test_expect_success 'test sparse status again with untracked cache' ' - avoid_racy && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ git status --porcelain >../status.actual && @@ -597,11 +595,11 @@ EOF test_expect_success 'set up for test of subdir and sparse checkouts' ' mkdir done/sub && mkdir done/sub/sub && - echo "sub" > done/sub/sub/file + echo "sub" > done/sub/sub/file && + test-tool chmtime =-120 done/sub/sub/file done/sub/sub done/sub done ' test_expect_success 'test sparse status with untracked cache and subdir' ' - avoid_racy && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ git status --porcelain >../status.actual && @@ -651,7 +649,6 @@ EOF ' test_expect_success 'test sparse status again with untracked cache and subdir' ' - avoid_racy && : >../trace.output && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace.output" \ git status --porcelain >../status.actual && diff --git a/t/t7103-reset-bare.sh b/t/t7103-reset-bare.sh index 0de83e3619..a60153f9f3 100755 --- a/t/t7103-reset-bare.sh +++ b/t/t7103-reset-bare.sh @@ -63,9 +63,12 @@ test_expect_success '"mixed" reset is not allowed in bare' ' test_must_fail git reset --mixed HEAD^ ' -test_expect_success '"soft" reset is allowed in bare' ' +test_expect_success !SANITIZE_LEAK '"soft" reset is allowed in bare' ' git reset --soft HEAD^ && - test "$(git show --pretty=format:%s | head -n 1)" = "one" + git show --pretty=format:%s >out && + echo one >expect && + head -n 1 out >actual && + test_cmp expect actual ' test_done diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh index 11cccbb333..000e055811 100755 --- a/t/t7406-submodule-update.sh +++ b/t/t7406-submodule-update.sh @@ -205,8 +205,18 @@ test_expect_success 'submodule update should fail due to local changes' ' (cd submodule && compare_head ) && - test_must_fail git submodule update submodule - ) + test_must_fail git submodule update submodule 2>../actual.raw + ) && + sed "s/^> //" >expect <<-\EOF && + > error: Your local changes to the following files would be overwritten by checkout: + > file + > Please commit your changes or stash them before you switch branches. + > Aborting + > fatal: Unable to checkout OID in submodule path '\''submodule'\'' + EOF + sed -e "s/checkout $SQ[^$SQ]*$SQ/checkout OID/" <actual.raw >actual && + test_cmp expect actual + ' test_expect_success 'submodule update should throw away changes with --force ' ' (cd super && @@ -1061,4 +1071,16 @@ test_expect_success 'submodule update --quiet passes quietness to fetch with a s ) ' +test_expect_success 'submodule update --filter requires --init' ' + test_expect_code 129 git -C super submodule update --filter blob:none +' + +test_expect_success 'submodule update --filter sets partial clone settings' ' + test_when_finished "rm -rf super-filter" && + git clone cloned super-filter && + git -C super-filter submodule update --init --filter blob:none && + test_cmp_config -C super-filter/submodule true remote.origin.promisor && + test_cmp_config -C super-filter/submodule blob:none remote.origin.partialclonefilter +' + test_done diff --git a/t/t7408-submodule-reference.sh b/t/t7408-submodule-reference.sh index a3892f494b..c3a4545510 100755 --- a/t/t7408-submodule-reference.sh +++ b/t/t7408-submodule-reference.sh @@ -193,7 +193,19 @@ test_expect_success 'missing nested submodule alternate fails clone and submodul cd supersuper-clone && check_that_two_of_three_alternates_are_used && # update of the submodule fails - test_must_fail git submodule update --init --recursive + cat >expect <<-\EOF && + fatal: submodule '\''sub'\'' cannot add alternate: path ... does not exist + Failed to clone '\''sub'\''. Retry scheduled + fatal: submodule '\''sub-dissociate'\'' cannot add alternate: path ... does not exist + Failed to clone '\''sub-dissociate'\''. Retry scheduled + fatal: submodule '\''sub'\'' cannot add alternate: path ... does not exist + Failed to clone '\''sub'\'' a second time, aborting + fatal: Failed to recurse into submodule path ... + EOF + test_must_fail git submodule update --init --recursive 2>err && + grep -e fatal: -e ^Failed err >actual.raw && + sed -e "s/path $SQ[^$SQ]*$SQ/path .../" <actual.raw >actual && + test_cmp expect actual ) ' diff --git a/t/t7519-status-fsmonitor.sh b/t/t7519-status-fsmonitor.sh index a6308acf00..fffc57120d 100755 --- a/t/t7519-status-fsmonitor.sh +++ b/t/t7519-status-fsmonitor.sh @@ -324,17 +324,24 @@ test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' cd dot-git && mkdir -p .git/hooks && : >tracked && + test-tool chmtime =-60 tracked && : >modified && + test-tool chmtime =-60 modified && mkdir dir1 && : >dir1/tracked && + test-tool chmtime =-60 dir1/tracked && : >dir1/modified && + test-tool chmtime =-60 dir1/modified && mkdir dir2 && : >dir2/tracked && + test-tool chmtime =-60 dir2/tracked && : >dir2/modified && + test-tool chmtime =-60 dir2/modified && write_integration_script && git config core.fsmonitor .git/hooks/fsmonitor-test && git update-index --untracked-cache && git update-index --fsmonitor && + git status && GIT_TRACE2_PERF="$TRASH_DIRECTORY/trace-before" \ git status && test-tool dump-untracked-cache >../before diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh index 5922fb5bdd..770d143204 100755 --- a/t/t7700-repack.sh +++ b/t/t7700-repack.sh @@ -381,4 +381,54 @@ test_expect_success TTY '--quiet disables progress' ' test_must_be_empty stderr ' +test_expect_success 'setup for update-server-info' ' + git init update-server-info && + test_commit -C update-server-info message +' + +test_server_info_present () { + test_path_is_file update-server-info/.git/objects/info/packs && + test_path_is_file update-server-info/.git/info/refs +} + +test_server_info_missing () { + test_path_is_missing update-server-info/.git/objects/info/packs && + test_path_is_missing update-server-info/.git/info/refs +} + +test_server_info_cleanup () { + rm -f update-server-info/.git/objects/info/packs update-server-info/.git/info/refs && + test_server_info_missing +} + +test_expect_success 'updates server info by default' ' + test_server_info_cleanup && + git -C update-server-info repack && + test_server_info_present +' + +test_expect_success '-n skips updating server info' ' + test_server_info_cleanup && + git -C update-server-info repack -n && + test_server_info_missing +' + +test_expect_success 'repack.updateServerInfo=true updates server info' ' + test_server_info_cleanup && + git -C update-server-info -c repack.updateServerInfo=true repack && + test_server_info_present +' + +test_expect_success 'repack.updateServerInfo=false skips updating server info' ' + test_server_info_cleanup && + git -C update-server-info -c repack.updateServerInfo=false repack && + test_server_info_missing +' + +test_expect_success '-n overrides repack.updateServerInfo=true' ' + test_server_info_cleanup && + git -C update-server-info -c repack.updateServerInfo=true repack -n && + test_server_info_missing +' + test_done diff --git a/t/t7812-grep-icase-non-ascii.sh b/t/t7812-grep-icase-non-ascii.sh index ca3f24f807..9047d665a1 100755 --- a/t/t7812-grep-icase-non-ascii.sh +++ b/t/t7812-grep-icase-non-ascii.sh @@ -11,9 +11,19 @@ test_expect_success GETTEXT_LOCALE 'setup' ' export LC_ALL ' -test_have_prereq GETTEXT_LOCALE && -test-tool regex "HALLÓ" "Halló" ICASE && -test_set_prereq REGEX_LOCALE +test_expect_success GETTEXT_LOCALE 'setup REGEX_LOCALE prerequisite' ' + # This "test-tool" invocation is identical... + if test-tool regex "HALLÓ" "Halló" ICASE + then + test_set_prereq REGEX_LOCALE + else + + # ... to this one, but this way "test_must_fail" will + # tell a segfault or abort() from the regexec() test + # itself + test_must_fail test-tool regex "HALLÓ" "Halló" ICASE + fi +' test_expect_success REGEX_LOCALE 'grep literal string, no -F' ' git grep -i "TILRAUN: Halló Heimur!" && diff --git a/t/t7817-grep-sparse-checkout.sh b/t/t7817-grep-sparse-checkout.sh index 590b99bbb6..eb59564565 100755 --- a/t/t7817-grep-sparse-checkout.sh +++ b/t/t7817-grep-sparse-checkout.sh @@ -83,10 +83,13 @@ test_expect_success 'setup' ' # The test below covers a special case: the sparsity patterns exclude '/b' and # sparse checkout is enabled, but the path exists in the working tree (e.g. -# manually created after `git sparse-checkout init`). git grep should skip it. +# manually created after `git sparse-checkout init`). Although b is marked +# as SKIP_WORKTREE, git grep should notice it IS present in the worktree and +# report it. test_expect_success 'working tree grep honors sparse checkout' ' cat >expect <<-EOF && a:text + b:new-text EOF test_when_finished "rm -f b" && echo "new-text" >b && @@ -126,12 +129,16 @@ test_expect_success 'grep --cached searches entries with the SKIP_WORKTREE bit' ' # Note that sub2/ is present in the worktree but it is excluded by the sparsity -# patterns, so grep should not recurse into it. +# patterns. We also explicitly mark it as SKIP_WORKTREE in case it got cleared +# by previous git commands. Thus sub2 starts as SKIP_WORKTREE but since it is +# present in the working tree, grep should recurse into it. test_expect_success 'grep --recurse-submodules honors sparse checkout in submodule' ' cat >expect <<-EOF && a:text sub/B/b:text + sub2/a:text EOF + git update-index --skip-worktree sub2 && git grep --recurse-submodules "text" >actual && test_cmp expect actual ' diff --git a/t/t9502-gitweb-standalone-parse-output.sh b/t/t9502-gitweb-standalone-parse-output.sh index 3167473b30..8cb582f0e6 100755 --- a/t/t9502-gitweb-standalone-parse-output.sh +++ b/t/t9502-gitweb-standalone-parse-output.sh @@ -34,7 +34,7 @@ EOF # # This will check that gitweb HTTP header contains proposed filename # as <basename> with '.tar' suffix added, and that generated tarfile -# (gitweb message body) has <prefix> as prefix for al files in tarfile +# (gitweb message body) has <prefix> as prefix for all files in tarfile # # <prefix> default to <basename> check_snapshot () { @@ -207,4 +207,17 @@ test_expect_success 'xss checks' ' xss "" "$TAG+" ' +no_http_equiv_content_type() { + gitweb_run "$@" && + ! grep -E "http-equiv=['\"]?content-type" gitweb.body +} + +# See: <https://html.spec.whatwg.org/dev/semantics.html#attr-meta-http-equiv-content-type> +test_expect_success 'no http-equiv="content-type" in XHTML' ' + no_http_equiv_content_type && + no_http_equiv_content_type "p=.git" && + no_http_equiv_content_type "p=.git;a=log" && + no_http_equiv_content_type "p=.git;a=tree" +' + test_done diff --git a/t/test-lib.sh b/t/test-lib.sh index 9af5fb7674..515b1af7ed 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -548,11 +548,29 @@ then } else setup_malloc_check () { + local g + local t MALLOC_CHECK_=3 MALLOC_PERTURB_=165 export MALLOC_CHECK_ MALLOC_PERTURB_ + if _GLIBC_VERSION=$(getconf GNU_LIBC_VERSION 2>/dev/null) && + _GLIBC_VERSION=${_GLIBC_VERSION#"glibc "} && + expr 2.34 \<= "$_GLIBC_VERSION" >/dev/null + then + g= + LD_PRELOAD="libc_malloc_debug.so.0" + for t in \ + glibc.malloc.check=1 \ + glibc.malloc.perturb=165 + do + g="${g#:}:$t" + done + GLIBC_TUNABLES=$g + export LD_PRELOAD GLIBC_TUNABLES + fi } teardown_malloc_check () { unset MALLOC_CHECK_ MALLOC_PERTURB_ + unset LD_PRELOAD GLIBC_TUNABLES } fi diff --git a/templates/Makefile b/templates/Makefile index d22a71a399..367ad00c24 100644 --- a/templates/Makefile +++ b/templates/Makefile @@ -1,9 +1,7 @@ -# make and install sample templates - -ifndef V - QUIET = @ -endif +# Import tree-wide shared Makefile behavior and libraries +include ../shared.mak +# make and install sample templates INSTALL ?= install TAR ?= tar RM ?= rm -f @@ -108,16 +108,11 @@ static int prepare_trace_line(const char *file, int line, gettimeofday(&tv, NULL); secs = tv.tv_sec; localtime_r(&secs, &tm); - strbuf_addf(buf, "%02d:%02d:%02d.%06ld ", tm.tm_hour, tm.tm_min, - tm.tm_sec, (long) tv.tv_usec); - -#ifdef HAVE_VARIADIC_MACROS - /* print file:line */ - strbuf_addf(buf, "%s:%d ", file, line); + strbuf_addf(buf, "%02d:%02d:%02d.%06ld %s:%d", tm.tm_hour, tm.tm_min, + tm.tm_sec, (long) tv.tv_usec, file, line); /* align trace output (column 40 catches most files names in git) */ while (buf->len < 40) strbuf_addch(buf, ' '); -#endif return 1; } @@ -229,74 +224,6 @@ static void trace_performance_vprintf_fl(const char *file, int line, strbuf_release(&buf); } -#ifndef HAVE_VARIADIC_MACROS - -void trace_printf(const char *format, ...) -{ - va_list ap; - va_start(ap, format); - trace_vprintf_fl(NULL, 0, &trace_default_key, format, ap); - va_end(ap); -} - -void trace_printf_key(struct trace_key *key, const char *format, ...) -{ - va_list ap; - va_start(ap, format); - trace_vprintf_fl(NULL, 0, key, format, ap); - va_end(ap); -} - -void trace_argv_printf(const char **argv, const char *format, ...) -{ - va_list ap; - va_start(ap, format); - trace_argv_vprintf_fl(NULL, 0, argv, format, ap); - va_end(ap); -} - -void trace_strbuf(struct trace_key *key, const struct strbuf *data) -{ - trace_strbuf_fl(NULL, 0, key, data); -} - -void trace_performance(uint64_t nanos, const char *format, ...) -{ - va_list ap; - va_start(ap, format); - trace_performance_vprintf_fl(NULL, 0, nanos, format, ap); - va_end(ap); -} - -void trace_performance_since(uint64_t start, const char *format, ...) -{ - va_list ap; - va_start(ap, format); - trace_performance_vprintf_fl(NULL, 0, getnanotime() - start, - format, ap); - va_end(ap); -} - -void trace_performance_leave(const char *format, ...) -{ - va_list ap; - uint64_t since; - - if (perf_indent) - perf_indent--; - - if (!format) /* Allow callers to leave without tracing anything */ - return; - - since = perf_start_times[perf_indent]; - va_start(ap, format); - trace_performance_vprintf_fl(NULL, 0, getnanotime() - since, - format, ap); - va_end(ap); -} - -#else - void trace_printf_key_fl(const char *file, int line, struct trace_key *key, const char *format, ...) { @@ -342,9 +269,6 @@ void trace_performance_leave_fl(const char *file, int line, va_end(ap); } -#endif /* HAVE_VARIADIC_MACROS */ - - static const char *quote_crnl(const char *path) { static struct strbuf new_path = STRBUF_INIT; @@ -126,71 +126,6 @@ void trace_command_performance(const char **argv); void trace_verbatim(struct trace_key *key, const void *buf, unsigned len); uint64_t trace_performance_enter(void); -#ifndef HAVE_VARIADIC_MACROS - -/** - * Prints a formatted message, similar to printf. - */ -__attribute__((format (printf, 1, 2))) -void trace_printf(const char *format, ...); - -__attribute__((format (printf, 2, 3))) -void trace_printf_key(struct trace_key *key, const char *format, ...); - -/** - * Prints a formatted message, followed by a quoted list of arguments. - */ -__attribute__((format (printf, 2, 3))) -void trace_argv_printf(const char **argv, const char *format, ...); - -/** - * Prints the strbuf, without additional formatting (i.e. doesn't - * choke on `%` or even `\0`). - */ -void trace_strbuf(struct trace_key *key, const struct strbuf *data); - -/** - * Prints elapsed time (in nanoseconds) if GIT_TRACE_PERFORMANCE is enabled. - * - * Example: - * ------------ - * uint64_t t = 0; - * for (;;) { - * // ignore - * t -= getnanotime(); - * // code section to measure - * t += getnanotime(); - * // ignore - * } - * trace_performance(t, "frotz"); - * ------------ - */ -__attribute__((format (printf, 2, 3))) -void trace_performance(uint64_t nanos, const char *format, ...); - -/** - * Prints elapsed time since 'start' if GIT_TRACE_PERFORMANCE is enabled. - * - * Example: - * ------------ - * uint64_t start = getnanotime(); - * // code section to measure - * trace_performance_since(start, "foobar"); - * ------------ - */ -__attribute__((format (printf, 2, 3))) -void trace_performance_since(uint64_t start, const char *format, ...); - -__attribute__((format (printf, 1, 2))) -void trace_performance_leave(const char *format, ...); - -#else - -/* - * Macros to add file:line - see above for C-style declarations of how these - * should be used. - */ - /* * TRACE_CONTEXT may be set to __FUNCTION__ if the compiler supports it. The * default is __FILE__, as it is consistent with assert(), and static function @@ -204,7 +139,10 @@ void trace_performance_leave(const char *format, ...); # define TRACE_CONTEXT __FILE__ #endif -/* +/** + * Macros to add the file:line of the calling code, instead of that of + * the trace function itself. + * * Note: with C99 variadic macros, __VA_ARGS__ must include the last fixed * parameter ('format' in this case). Otherwise, a call without variable * arguments will have a surplus ','. E.g.: @@ -220,6 +158,16 @@ void trace_performance_leave(const char *format, ...); * comma, but this is non-standard. */ +/** + * trace_printf(), accepts "const char *format, ...". + * + * Prints a formatted message, similar to printf. + */ +#define trace_printf(...) trace_printf_key(&trace_default_key, __VA_ARGS__) + +/** + * trace_printf_key(), accepts "struct trace_key *key, const char *format, ...". + */ #define trace_printf_key(key, ...) \ do { \ if (trace_pass_fl(key)) \ @@ -227,8 +175,11 @@ void trace_performance_leave(const char *format, ...); __VA_ARGS__); \ } while (0) -#define trace_printf(...) trace_printf_key(&trace_default_key, __VA_ARGS__) - +/** + * trace_argv_printf(), accepts "struct trace_key *key, const char *format, ...)". + * + * Prints a formatted message, followed by a quoted list of arguments. + */ #define trace_argv_printf(argv, ...) \ do { \ if (trace_pass_fl(&trace_default_key)) \ @@ -236,12 +187,36 @@ void trace_performance_leave(const char *format, ...); argv, __VA_ARGS__); \ } while (0) +/** + * trace_strbuf(), accepts "struct trace_key *key, const struct strbuf *data". + * + * Prints the strbuf, without additional formatting (i.e. doesn't + * choke on `%` or even `\0`). + */ #define trace_strbuf(key, data) \ do { \ if (trace_pass_fl(key)) \ trace_strbuf_fl(TRACE_CONTEXT, __LINE__, key, data);\ } while (0) +/** + * trace_performance(), accepts "uint64_t nanos, const char *format, ...". + * + * Prints elapsed time (in nanoseconds) if GIT_TRACE_PERFORMANCE is enabled. + * + * Example: + * ------------ + * uint64_t t = 0; + * for (;;) { + * // ignore + * t -= getnanotime(); + * // code section to measure + * t += getnanotime(); + * // ignore + * } + * trace_performance(t, "frotz"); + * ------------ + */ #define trace_performance(nanos, ...) \ do { \ if (trace_pass_fl(&trace_perf_key)) \ @@ -249,6 +224,18 @@ void trace_performance_leave(const char *format, ...); __VA_ARGS__); \ } while (0) +/** + * trace_performance_since(), accepts "uint64_t start, const char *format, ...". + * + * Prints elapsed time since 'start' if GIT_TRACE_PERFORMANCE is enabled. + * + * Example: + * ------------ + * uint64_t start = getnanotime(); + * // code section to measure + * trace_performance_since(start, "foobar"); + * ------------ + */ #define trace_performance_since(start, ...) \ do { \ if (trace_pass_fl(&trace_perf_key)) \ @@ -257,6 +244,9 @@ void trace_performance_leave(const char *format, ...); __VA_ARGS__); \ } while (0) +/** + * trace_performance_leave(), accepts "const char *format, ...". + */ #define trace_performance_leave(...) \ do { \ if (trace_pass_fl(&trace_perf_key)) \ @@ -285,6 +275,4 @@ static inline int trace_pass_fl(struct trace_key *key) return key->fd || !key->initialized; } -#endif /* HAVE_VARIADIC_MACROS */ - #endif /* TRACE_H */ @@ -641,20 +641,6 @@ void trace2_region_enter_printf_fl(const char *file, int line, va_end(ap); } -#ifndef HAVE_VARIADIC_MACROS -void trace2_region_enter_printf(const char *category, const char *label, - const struct repository *repo, const char *fmt, - ...) -{ - va_list ap; - - va_start(ap, fmt); - trace2_region_enter_printf_va_fl(NULL, 0, category, label, repo, fmt, - ap); - va_end(ap); -} -#endif - void trace2_region_leave_printf_va_fl(const char *file, int line, const char *category, const char *label, const struct repository *repo, @@ -717,20 +703,6 @@ void trace2_region_leave_printf_fl(const char *file, int line, va_end(ap); } -#ifndef HAVE_VARIADIC_MACROS -void trace2_region_leave_printf(const char *category, const char *label, - const struct repository *repo, const char *fmt, - ...) -{ - va_list ap; - - va_start(ap, fmt); - trace2_region_leave_printf_va_fl(NULL, 0, category, label, repo, fmt, - ap); - va_end(ap); -} -#endif - void trace2_data_string_fl(const char *file, int line, const char *category, const struct repository *repo, const char *key, const char *value) @@ -826,17 +798,6 @@ void trace2_printf_fl(const char *file, int line, const char *fmt, ...) va_end(ap); } -#ifndef HAVE_VARIADIC_MACROS -void trace2_printf(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - trace2_printf_va_fl(NULL, 0, fmt, ap); - va_end(ap); -} -#endif - const char *trace2_session_id(void) { return tr2_sid_get(); @@ -397,18 +397,9 @@ void trace2_region_enter_printf_fl(const char *file, int line, const struct repository *repo, const char *fmt, ...); -#ifdef HAVE_VARIADIC_MACROS #define trace2_region_enter_printf(category, label, repo, ...) \ trace2_region_enter_printf_fl(__FILE__, __LINE__, (category), (label), \ (repo), __VA_ARGS__) -#else -/* clang-format off */ -__attribute__((format (region_enter_printf, 4, 5))) -void trace2_region_enter_printf(const char *category, const char *label, - const struct repository *repo, const char *fmt, - ...); -/* clang-format on */ -#endif /** * Emit a 'region_leave' event for <category>.<label> with optional @@ -442,18 +433,9 @@ void trace2_region_leave_printf_fl(const char *file, int line, const struct repository *repo, const char *fmt, ...); -#ifdef HAVE_VARIADIC_MACROS #define trace2_region_leave_printf(category, label, repo, ...) \ trace2_region_leave_printf_fl(__FILE__, __LINE__, (category), (label), \ (repo), __VA_ARGS__) -#else -/* clang-format off */ -__attribute__((format (region_leave_printf, 4, 5))) -void trace2_region_leave_printf(const char *category, const char *label, - const struct repository *repo, const char *fmt, - ...); -/* clang-format on */ -#endif /** * Emit a key-value pair 'data' event of the form <category>.<key> = <value>. @@ -506,14 +488,7 @@ void trace2_printf_va_fl(const char *file, int line, const char *fmt, void trace2_printf_fl(const char *file, int line, const char *fmt, ...); -#ifdef HAVE_VARIADIC_MACROS #define trace2_printf(...) trace2_printf_fl(__FILE__, __LINE__, __VA_ARGS__) -#else -/* clang-format off */ -__attribute__((format (printf, 1, 2))) -void trace2_printf(const char *fmt, ...); -/* clang-format on */ -#endif /* * Optional platform-specific code to dump information about the diff --git a/transport.c b/transport.c index 253d6671b1..70e9840a90 100644 --- a/transport.c +++ b/transport.c @@ -125,16 +125,9 @@ struct bundle_transport_data { unsigned get_refs_from_bundle_called : 1; }; -static struct ref *get_refs_from_bundle(struct transport *transport, - int for_push, - struct transport_ls_refs_options *transport_options) +static void get_refs_from_bundle_inner(struct transport *transport) { struct bundle_transport_data *data = transport->data; - struct ref *result = NULL; - int i; - - if (for_push) - return NULL; data->get_refs_from_bundle_called = 1; @@ -145,6 +138,20 @@ static struct ref *get_refs_from_bundle(struct transport *transport, die(_("could not read bundle '%s'"), transport->url); transport->hash_algo = data->header.hash_algo; +} + +static struct ref *get_refs_from_bundle(struct transport *transport, + int for_push, + struct transport_ls_refs_options *transport_options) +{ + struct bundle_transport_data *data = transport->data; + struct ref *result = NULL; + int i; + + if (for_push) + return NULL; + + get_refs_from_bundle_inner(transport); for (i = 0; i < data->header.references.nr; i++) { struct string_list_item *e = data->header.references.items + i; @@ -169,7 +176,7 @@ static int fetch_refs_from_bundle(struct transport *transport, strvec_push(&extra_index_pack_args, "-v"); if (!data->get_refs_from_bundle_called) - get_refs_from_bundle(transport, 0, NULL); + get_refs_from_bundle_inner(transport); ret = unbundle(the_repository, &data->header, data->fd, &extra_index_pack_args); transport->hash_algo = data->header.hash_algo; diff --git a/tree-walk.c b/tree-walk.c index 3a94959d64..506234b4b8 100644 --- a/tree-walk.c +++ b/tree-walk.c @@ -89,7 +89,7 @@ void *fill_tree_descriptor(struct repository *r, void *buf = NULL; if (oid) { - buf = read_object_with_reference(r, oid, tree_type, &size, NULL); + buf = read_object_with_reference(r, oid, OBJ_TREE, &size, NULL); if (!buf) die("unable to read tree %s", oid_to_hex(oid)); } @@ -605,7 +605,7 @@ int get_tree_entry(struct repository *r, unsigned long size; struct object_id root; - tree = read_object_with_reference(r, tree_oid, tree_type, &size, &root); + tree = read_object_with_reference(r, tree_oid, OBJ_TREE, &size, &root); if (!tree) return -1; @@ -677,7 +677,7 @@ enum get_oid_result get_tree_entry_follow_symlinks(struct repository *r, unsigned long size; tree = read_object_with_reference(r, ¤t_tree_oid, - tree_type, &size, + OBJ_TREE, &size, &root); if (!tree) goto done; diff --git a/unpack-trees.c b/unpack-trees.c index 360844bda3..7f528d35cc 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -595,13 +595,6 @@ static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) { ce->ce_flags |= CE_UNPACKED; - /* - * If this is a sparse directory, don't advance cache_bottom. - * That will be advanced later using the cache-tree data. - */ - if (S_ISSPARSEDIR(ce->ce_mode)) - return; - if (o->cache_bottom < o->src_index->cache_nr && o->src_index->cache[o->cache_bottom] == ce) { int bottom = o->cache_bottom; @@ -651,24 +644,17 @@ static void mark_ce_used_same_name(struct cache_entry *ce, } } -static struct cache_entry *next_cache_entry(struct unpack_trees_options *o, int *hint) +static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) { const struct index_state *index = o->src_index; int pos = o->cache_bottom; - if (*hint > pos) - pos = *hint; - while (pos < index->cache_nr) { struct cache_entry *ce = index->cache[pos]; - if (!(ce->ce_flags & CE_UNPACKED)) { - *hint = pos + 1; + if (!(ce->ce_flags & CE_UNPACKED)) return ce; - } pos++; } - - *hint = pos; return NULL; } @@ -1360,6 +1346,42 @@ static int is_sparse_directory_entry(struct cache_entry *ce, return sparse_dir_matches_path(ce, info, name); } +static int unpack_sparse_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) +{ + struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; + struct unpack_trees_options *o = info->data; + int ret; + + assert(o->merge); + + /* + * Unlike in 'unpack_callback', where src[0] is derived from the index when + * merging, src[0] is a transient cache entry derived from the first tree + * provided. Create the temporary entry as if it came from a non-sparse index. + */ + if (!is_null_oid(&names[0].oid)) { + src[0] = create_ce_entry(info, &names[0], 0, + &o->result, 1, + dirmask & (1ul << 0)); + src[0]->ce_flags |= (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE); + } + + /* + * 'unpack_single_entry' assumes that src[0] is derived directly from + * the index, rather than from an entry in 'names'. This is *not* true when + * merging a sparse directory, in which case names[0] is the "index" source + * entry. To match the expectations of 'unpack_single_entry', shift past the + * "index" tree (i.e., names[0]) and adjust 'names', 'n', 'mask', and + * 'dirmask' accordingly. + */ + ret = unpack_single_entry(n - 1, mask >> 1, dirmask >> 1, src, names + 1, info); + + if (src[0]) + discard_cache_entry(src[0]); + + return ret >= 0 ? mask : -1; +} + /* * Note that traverse_by_cache_tree() duplicates some logic in this function * without actually calling it. If you change the logic here you may need to @@ -1380,13 +1402,12 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str /* Are we supposed to look at the index too? */ if (o->merge) { - int hint = -1; while (1) { int cmp; struct cache_entry *ce; if (o->diff_index_cached) - ce = next_cache_entry(o, &hint); + ce = next_cache_entry(o); else ce = find_cache_entry(info, p); @@ -1442,7 +1463,14 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str * it does not do any look-ahead, so this is safe. */ if (matches) { - o->cache_bottom += matches; + /* + * Only increment the cache_bottom if the + * directory isn't a sparse directory index + * entry (if it is, it was already incremented) + * in 'mark_ce_used()' + */ + if (!src[0] || !S_ISSPARSEDIR(src[0]->ce_mode)) + o->cache_bottom += matches; return mask; } } @@ -1693,6 +1721,41 @@ static void populate_from_existing_patterns(struct unpack_trees_options *o, o->pl = pl; } +static void update_sparsity_for_prefix(const char *prefix, + struct index_state *istate) +{ + int prefix_len = strlen(prefix); + struct strbuf ce_prefix = STRBUF_INIT; + + if (!istate->sparse_index) + return; + + while (prefix_len > 0 && prefix[prefix_len - 1] == '/') + prefix_len--; + + if (prefix_len <= 0) + BUG("Invalid prefix passed to update_sparsity_for_prefix"); + + strbuf_grow(&ce_prefix, prefix_len + 1); + strbuf_add(&ce_prefix, prefix, prefix_len); + strbuf_addch(&ce_prefix, '/'); + + /* + * If the prefix points to a sparse directory or a path inside a sparse + * directory, the index should be expanded. This is accomplished in one + * of two ways: + * - if the prefix is inside a sparse directory, it will be expanded by + * the 'ensure_full_index(...)' call in 'index_name_pos(...)'. + * - if the prefix matches an existing sparse directory entry, + * 'index_name_pos(...)' will return its index position, triggering + * the 'ensure_full_index(...)' below. + */ + if (!path_in_cone_mode_sparse_checkout(ce_prefix.buf, istate) && + index_name_pos(istate, ce_prefix.buf, ce_prefix.len) >= 0) + ensure_full_index(istate); + + strbuf_release(&ce_prefix); +} static int verify_absent(const struct cache_entry *, enum unpack_trees_error_types, @@ -1706,7 +1769,7 @@ static int verify_absent(const struct cache_entry *, int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o) { struct repository *repo = the_repository; - int i, hint, ret; + int i, ret; static struct cache_entry *dfc; struct pattern_list pl; int free_pattern_list = 0; @@ -1739,6 +1802,9 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options setup_standard_excludes(o->dir); } + if (o->prefix) + update_sparsity_for_prefix(o->prefix, o->src_index); + if (!core_apply_sparse_checkout || !o->update) o->skip_sparse_checkout = 1; if (!o->skip_sparse_checkout && !o->pl) { @@ -1795,15 +1861,13 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options info.pathspec = o->pathspec; if (o->prefix) { - hint = -1; - /* * Unpack existing index entries that sort before the * prefix the tree is spliced into. Note that o->merge * is always true in this case. */ while (1) { - struct cache_entry *ce = next_cache_entry(o, &hint); + struct cache_entry *ce = next_cache_entry(o); if (!ce) break; if (ce_in_traverse_path(ce, &info)) @@ -1824,9 +1888,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options /* Any left-over entries in the index? */ if (o->merge) { - hint = -1; while (1) { - struct cache_entry *ce = next_cache_entry(o, &hint); + struct cache_entry *ce = next_cache_entry(o); if (!ce) break; if (unpack_index_entry(ce, o) < 0) @@ -2065,7 +2128,9 @@ static int verify_uptodate_1(const struct cache_entry *ce, int verify_uptodate(const struct cache_entry *ce, struct unpack_trees_options *o) { - if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) + if (!o->skip_sparse_checkout && + (ce->ce_flags & CE_SKIP_WORKTREE) && + (ce->ce_flags & CE_NEW_SKIP_WORKTREE)) return 0; return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE); } @@ -2434,6 +2499,37 @@ static int merged_entry(const struct cache_entry *ce, return 1; } +static int merged_sparse_dir(const struct cache_entry * const *src, int n, + struct unpack_trees_options *o) +{ + struct tree_desc t[MAX_UNPACK_TREES + 1]; + void * tree_bufs[MAX_UNPACK_TREES + 1]; + struct traverse_info info; + int i, ret; + + /* + * Create the tree traversal information for traversing into *only* the + * sparse directory. + */ + setup_traverse_info(&info, src[0]->name); + info.fn = unpack_sparse_callback; + info.data = o; + info.show_all_errors = o->show_all_errors; + info.pathspec = o->pathspec; + + /* Get the tree descriptors of the sparse directory in each of the merging trees */ + for (i = 0; i < n; i++) + tree_bufs[i] = fill_tree_descriptor(o->src_index->repo, &t[i], + src[i] && !is_null_oid(&src[i]->oid) ? &src[i]->oid : NULL); + + ret = traverse_trees(o->src_index, n, t, &info); + + for (i = 0; i < n; i++) + free(tree_bufs[i]); + + return ret; +} + static int deleted_entry(const struct cache_entry *ce, const struct cache_entry *old, struct unpack_trees_options *o) @@ -2538,16 +2634,24 @@ int threeway_merge(const struct cache_entry * const *stages, */ /* #14, #14ALT, #2ALT */ if (remote && !df_conflict_head && head_match && !remote_match) { - if (index && !same(index, remote) && !same(index, head)) - return reject_merge(index, o); + if (index && !same(index, remote) && !same(index, head)) { + if (S_ISSPARSEDIR(index->ce_mode)) + return merged_sparse_dir(stages, 4, o); + else + return reject_merge(index, o); + } return merged_entry(remote, index, o); } /* * If we have an entry in the index cache, then we want to * make sure that it matches head. */ - if (index && !same(index, head)) - return reject_merge(index, o); + if (index && !same(index, head)) { + if (S_ISSPARSEDIR(index->ce_mode)) + return merged_sparse_dir(stages, 4, o); + else + return reject_merge(index, o); + } if (head) { /* #5ALT, #15 */ @@ -2609,11 +2713,21 @@ int threeway_merge(const struct cache_entry * const *stages, } - /* Below are "no merge" cases, which require that the index be - * up-to-date to avoid the files getting overwritten with - * conflict resolution files. - */ + /* Handle "no merge" cases (see t/t1000-read-tree-m-3way.sh) */ if (index) { + /* + * If we've reached the "no merge" cases and we're merging + * a sparse directory, we may have an "edit/edit" conflict that + * can be resolved by individually merging directory contents. + */ + if (S_ISSPARSEDIR(index->ce_mode)) + return merged_sparse_dir(stages, 4, o); + + /* + * If we're not merging a sparse directory, ensure the index is + * up-to-date to avoid files getting overwritten with conflict + * resolution files + */ if (verify_uptodate(index, o)) return -1; } @@ -2704,6 +2818,14 @@ int twoway_merge(const struct cache_entry * const *src, * reject the merge instead. */ return merged_entry(newtree, current, o); + } else if (S_ISSPARSEDIR(current->ce_mode)) { + /* + * The sparse directories differ, but we don't know whether that's + * because of two different files in the directory being modified + * (can be trivially merged) or if there is a real file conflict. + * Merge the sparse directory by OID to compare file-by-file. + */ + return merged_sparse_dir(src, 3, o); } else return reject_merge(current, o); } diff --git a/upload-pack.c b/upload-pack.c index 8acc98741b..3a851b3606 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -1400,13 +1400,19 @@ static int parse_want(struct packet_writer *writer, const char *line, const char *arg; if (skip_prefix(line, "want ", &arg)) { struct object_id oid; + struct commit *commit; struct object *o; if (get_oid_hex(arg, &oid)) die("git upload-pack: protocol error, " "expected to get oid, not '%s'", line); - o = parse_object(the_repository, &oid); + commit = lookup_commit_in_graph(the_repository, &oid); + if (commit) + o = &commit->object; + else + o = parse_object(the_repository, &oid); + if (!o) { packet_writer_error(writer, "upload-pack: not our ref %s", @@ -1434,7 +1440,7 @@ static int parse_want_ref(struct packet_writer *writer, const char *line, if (skip_prefix(line, "want-ref ", &refname_nons)) { struct object_id oid; struct string_list_item *item; - struct object *o; + struct object *o = NULL; struct strbuf refname = STRBUF_INIT; strbuf_addf(&refname, "%s%s", get_git_namespace(), refname_nons); @@ -1448,7 +1454,15 @@ static int parse_want_ref(struct packet_writer *writer, const char *line, item = string_list_append(wanted_refs, refname_nons); item->util = oiddup(&oid); - o = parse_object_or_die(&oid, refname_nons); + if (!starts_with(refname_nons, "refs/tags/")) { + struct commit *commit = lookup_commit_in_graph(the_repository, &oid); + if (commit) + o = &commit->object; + } + + if (!o) + o = parse_object_or_die(&oid, refname_nons); + if (!(o->flags & WANTED)) { o->flags |= WANTED; add_object_array(o, NULL, want_obj); diff --git a/urlmatch.c b/urlmatch.c index 03ad3f30a9..b615adc923 100644 --- a/urlmatch.c +++ b/urlmatch.c @@ -611,3 +611,8 @@ int urlmatch_config_entry(const char *var, const char *value, void *cb) strbuf_release(&synthkey); return retval; } + +void urlmatch_config_release(struct urlmatch_config *config) +{ + string_list_clear(&config->vars, 1); +} diff --git a/urlmatch.h b/urlmatch.h index 34a3ba6d19..9f40b00bfb 100644 --- a/urlmatch.h +++ b/urlmatch.h @@ -71,5 +71,6 @@ struct urlmatch_config { } int urlmatch_config_entry(const char *var, const char *value, void *cb); +void urlmatch_config_release(struct urlmatch_config *config); #endif /* URL_MATCH_H */ @@ -299,10 +299,7 @@ static NORETURN void BUG_vfl(const char *file, int line, const char *fmt, va_lis va_copy(params_copy, params); /* truncation via snprintf is OK here */ - if (file) - snprintf(prefix, sizeof(prefix), "BUG: %s:%d: ", file, line); - else - snprintf(prefix, sizeof(prefix), "BUG: "); + snprintf(prefix, sizeof(prefix), "BUG: %s:%d: ", file, line); vreportf(prefix, fmt, params); @@ -317,7 +314,6 @@ static NORETURN void BUG_vfl(const char *file, int line, const char *fmt, va_lis abort(); } -#ifdef HAVE_VARIADIC_MACROS NORETURN void BUG_fl(const char *file, int line, const char *fmt, ...) { va_list ap; @@ -325,15 +321,6 @@ NORETURN void BUG_fl(const char *file, int line, const char *fmt, ...) BUG_vfl(file, line, fmt, ap); va_end(ap); } -#else -NORETURN void BUG(const char *fmt, ...) -{ - va_list ap; - va_start(ap, fmt); - BUG_vfl(NULL, 0, fmt, ap); - va_end(ap); -} -#endif #ifdef SUPPRESS_ANNOTATED_LEAKS void unleak_memory(const void *ptr, size_t len) diff --git a/userdiff.c b/userdiff.c index 2d9eb99bf2..151d9a5278 100644 --- a/userdiff.c +++ b/userdiff.c @@ -180,6 +180,18 @@ PATTERNS("java", "|[-+0-9.e]+[fFlL]?|0[xXbB]?[0-9a-fA-F]+[lL]?" "|[-+*/<>%&^|=!]=" "|--|\\+\\+|<<=?|>>>?=?|&&|\\|\\|"), +PATTERNS("kotlin", + "^[ \t]*(([a-z]+[ \t]+)*(fun|class|interface)[ \t]+.*)$", + /* -- */ + "[a-zA-Z_][a-zA-Z0-9_]*" + /* hexadecimal and binary numbers */ + "|0[xXbB][0-9a-fA-F_]+[lLuU]*" + /* integers and floats */ + "|[0-9][0-9_]*([.][0-9_]*)?([Ee][-+]?[0-9]+)?[fFlLuU]*" + /* floating point numbers beginning with decimal point */ + "|[.][0-9][0-9_]*([Ee][-+]?[0-9]+)?[fFlLuU]?" + /* unary and binary operators */ + "|[-+*/<>%&^|=!]==?|--|\\+\\+|<<=|>>=|&&|\\|\\||->|\\.\\*|!!|[?:.][.:]"), PATTERNS("markdown", "^ {0,3}#{1,6}[ \t].*", /* -- */ @@ -4,6 +4,13 @@ #include "cache.h" #include "config.h" +#ifdef HAVE_RTLGENRANDOM +/* This is required to get access to RtlGenRandom. */ +#define SystemFunction036 NTAPI SystemFunction036 +#include <NTSecAPI.h> +#undef SystemFunction036 +#endif + static int memory_limit_check(size_t size, int gentle) { static size_t limit = 0; @@ -539,6 +546,70 @@ int xmkstemp_mode(char *filename_template, int mode) return fd; } +/* + * Some platforms return EINTR from fsync. Since fsync is invoked in some + * cases by a wrapper that dies on failure, do not expose EINTR to callers. + */ +static int fsync_loop(int fd) +{ + int err; + + do { + err = fsync(fd); + } while (err < 0 && errno == EINTR); + return err; +} + +int git_fsync(int fd, enum fsync_action action) +{ + switch (action) { + case FSYNC_WRITEOUT_ONLY: + +#ifdef __APPLE__ + /* + * On macOS, fsync just causes filesystem cache writeback but + * does not flush hardware caches. + */ + return fsync_loop(fd); +#endif + +#ifdef HAVE_SYNC_FILE_RANGE + /* + * On linux 2.6.17 and above, sync_file_range is the way to + * issue a writeback without a hardware flush. An offset of + * 0 and size of 0 indicates writeout of the entire file and the + * wait flags ensure that all dirty data is written to the disk + * (potentially in a disk-side cache) before we continue. + */ + + return sync_file_range(fd, 0, 0, SYNC_FILE_RANGE_WAIT_BEFORE | + SYNC_FILE_RANGE_WRITE | + SYNC_FILE_RANGE_WAIT_AFTER); +#endif + +#ifdef fsync_no_flush + return fsync_no_flush(fd); +#endif + + errno = ENOSYS; + return -1; + + case FSYNC_HARDWARE_FLUSH: + /* + * On macOS, a special fcntl is required to really flush the + * caches within the storage controller. As of this writing, + * this is a very expensive operation on Apple SSDs. + */ +#ifdef __APPLE__ + return fcntl(fd, F_FULLFSYNC); +#else + return fsync_loop(fd); +#endif + default: + BUG("unexpected git_fsync(%d) call", action); + } +} + static int warn_if_unremovable(const char *op, const char *file, int rc) { int err; diff --git a/write-or-die.c b/write-or-die.c index a3d5784cec..c4fd91b5b4 100644 --- a/write-or-die.c +++ b/write-or-die.c @@ -56,16 +56,37 @@ void fprintf_or_die(FILE *f, const char *fmt, ...) } } -void fsync_or_die(int fd, const char *msg) +static int maybe_fsync(int fd) { if (use_fsync < 0) use_fsync = git_env_bool("GIT_TEST_FSYNC", 1); if (!use_fsync) - return; - while (fsync(fd) < 0) { - if (errno != EINTR) - die_errno("fsync error on '%s'", msg); - } + return 0; + + if (fsync_method == FSYNC_METHOD_WRITEOUT_ONLY && + git_fsync(fd, FSYNC_WRITEOUT_ONLY) >= 0) + return 0; + + return git_fsync(fd, FSYNC_HARDWARE_FLUSH); +} + +void fsync_or_die(int fd, const char *msg) +{ + if (maybe_fsync(fd) < 0) + die_errno("fsync error on '%s'", msg); +} + +int fsync_component(enum fsync_component component, int fd) +{ + if (fsync_components & component) + return maybe_fsync(fd); + return 0; +} + +void fsync_component_or_die(enum fsync_component component, int fd, const char *msg) +{ + if (fsync_components & component) + fsync_or_die(fd, msg); } void write_or_die(int fd, const void *buf, size_t count) diff --git a/wt-status.c b/wt-status.c index 335e723a71..d33f9272b7 100644 --- a/wt-status.c +++ b/wt-status.c @@ -651,6 +651,15 @@ static void wt_status_collect_changes_index(struct wt_status *s) rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename; rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit; rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score; + + /* + * The `recursive` option must be enabled to allow the diff to recurse + * into subdirectories of sparse directory index entries. If it is not + * enabled, a subdirectory containing file(s) with changes is reported + * as "modified", rather than the modified files themselves. + */ + rev.diffopt.flags.recursive = 1; + copy_pathspec(&rev.prune_data, &s->pathspec); run_diff_index(&rev, 1); object_array_clear(&rev.pending); @@ -1374,10 +1383,10 @@ static void show_rebase_information(struct wt_status *s, status_printf_ln(s, color, _("No commands done.")); else { status_printf_ln(s, color, - Q_("Last command done (%d command done):", - "Last commands done (%d commands done):", + Q_("Last command done (%"PRIuMAX" command done):", + "Last commands done (%"PRIuMAX" commands done):", have_done.nr), - have_done.nr); + (uintmax_t)have_done.nr); for (i = (have_done.nr > nr_lines_to_show) ? have_done.nr - nr_lines_to_show : 0; i < have_done.nr; @@ -1393,10 +1402,10 @@ static void show_rebase_information(struct wt_status *s, _("No commands remaining.")); else { status_printf_ln(s, color, - Q_("Next command to do (%d remaining command):", - "Next commands to do (%d remaining commands):", + Q_("Next command to do (%"PRIuMAX" remaining command):", + "Next commands to do (%"PRIuMAX" remaining commands):", yet_to_do.nr), - yet_to_do.nr); + (uintmax_t)yet_to_do.nr); for (i = 0; i < nr_lines_to_show && i < yet_to_do.nr; i++) status_printf_ln(s, color, " %s", yet_to_do.items[i].string); if (s->hints) diff --git a/xdiff/xdiffi.c b/xdiff/xdiffi.c index 69689fab24..758410c11a 100644 --- a/xdiff/xdiffi.c +++ b/xdiff/xdiffi.c @@ -315,16 +315,19 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, long *kvd, *kvdf, *kvdb; xdalgoenv_t xenv; diffdata_t dd1, dd2; + int res; - if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) - return xdl_do_patience_diff(mf1, mf2, xpp, xe); - - if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) - return xdl_do_histogram_diff(mf1, mf2, xpp, xe); + if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) + return -1; - if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) { + if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) { + res = xdl_do_patience_diff(mf1, mf2, xpp, xe); + goto out; + } - return -1; + if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) { + res = xdl_do_histogram_diff(mf1, mf2, xpp, xe); + goto out; } /* @@ -359,17 +362,15 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, dd2.rchg = xe->xdf2.rchg; dd2.rindex = xe->xdf2.rindex; - if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec, - kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) { - - xdl_free(kvd); - xdl_free_env(xe); - return -1; - } - + res = xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec, + kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, + &xenv); xdl_free(kvd); + out: + if (res < 0) + xdl_free_env(xe); - return 0; + return res; } diff --git a/xdiff/xhistogram.c b/xdiff/xhistogram.c index 80794748b0..01decffc33 100644 --- a/xdiff/xhistogram.c +++ b/xdiff/xhistogram.c @@ -372,9 +372,6 @@ out: int xdl_do_histogram_diff(mmfile_t *file1, mmfile_t *file2, xpparam_t const *xpp, xdfenv_t *env) { - if (xdl_prepare_env(file1, file2, xpp, env) < 0) - return -1; - return histogram_diff(xpp, env, env->xdf1.dstart + 1, env->xdf1.dend - env->xdf1.dstart + 1, env->xdf2.dstart + 1, env->xdf2.dend - env->xdf2.dstart + 1); diff --git a/xdiff/xmerge.c b/xdiff/xmerge.c index fff0b594f9..af40c88a5b 100644 --- a/xdiff/xmerge.c +++ b/xdiff/xmerge.c @@ -684,42 +684,42 @@ static int xdl_do_merge(xdfenv_t *xe1, xdchange_t *xscr1, int xdl_merge(mmfile_t *orig, mmfile_t *mf1, mmfile_t *mf2, xmparam_t const *xmp, mmbuffer_t *result) { - xdchange_t *xscr1, *xscr2; + xdchange_t *xscr1 = NULL, *xscr2 = NULL; xdfenv_t xe1, xe2; - int status; + int status = -1; xpparam_t const *xpp = &xmp->xpp; result->ptr = NULL; result->size = 0; - if (xdl_do_diff(orig, mf1, xpp, &xe1) < 0) { + if (xdl_do_diff(orig, mf1, xpp, &xe1) < 0) return -1; - } - if (xdl_do_diff(orig, mf2, xpp, &xe2) < 0) { - xdl_free_env(&xe1); - return -1; - } + + if (xdl_do_diff(orig, mf2, xpp, &xe2) < 0) + goto free_xe1; /* avoid double free of xe2 */ + if (xdl_change_compact(&xe1.xdf1, &xe1.xdf2, xpp->flags) < 0 || xdl_change_compact(&xe1.xdf2, &xe1.xdf1, xpp->flags) < 0 || - xdl_build_script(&xe1, &xscr1) < 0) { - xdl_free_env(&xe1); - return -1; - } + xdl_build_script(&xe1, &xscr1) < 0) + goto out; + if (xdl_change_compact(&xe2.xdf1, &xe2.xdf2, xpp->flags) < 0 || xdl_change_compact(&xe2.xdf2, &xe2.xdf1, xpp->flags) < 0 || - xdl_build_script(&xe2, &xscr2) < 0) { - xdl_free_script(xscr1); - xdl_free_env(&xe1); - xdl_free_env(&xe2); - return -1; - } - status = 0; + xdl_build_script(&xe2, &xscr2) < 0) + goto out; + if (!xscr1) { result->ptr = xdl_malloc(mf2->size); + if (!result->ptr) + goto out; + status = 0; memcpy(result->ptr, mf2->ptr, mf2->size); result->size = mf2->size; } else if (!xscr2) { result->ptr = xdl_malloc(mf1->size); + if (!result->ptr) + goto out; + status = 0; memcpy(result->ptr, mf1->ptr, mf1->size); result->size = mf1->size; } else { @@ -727,11 +727,13 @@ int xdl_merge(mmfile_t *orig, mmfile_t *mf1, mmfile_t *mf2, &xe2, xscr2, xmp, result); } + out: xdl_free_script(xscr1); xdl_free_script(xscr2); - xdl_free_env(&xe1); xdl_free_env(&xe2); + free_xe1: + xdl_free_env(&xe1); return status; } diff --git a/xdiff/xpatience.c b/xdiff/xpatience.c index c5d48e80ae..1a21c6a74b 100644 --- a/xdiff/xpatience.c +++ b/xdiff/xpatience.c @@ -198,7 +198,7 @@ static int binary_search(struct entry **sequence, int longest, * item per sequence length: the sequence with the smallest last * element (in terms of line2). */ -static struct entry *find_longest_common_sequence(struct hashmap *map) +static int find_longest_common_sequence(struct hashmap *map, struct entry **res) { struct entry **sequence = xdl_malloc(map->nr * sizeof(struct entry *)); int longest = 0, i; @@ -211,6 +211,9 @@ static struct entry *find_longest_common_sequence(struct hashmap *map) */ int anchor_i = -1; + if (!sequence) + return -1; + for (entry = map->first; entry; entry = entry->next) { if (!entry->line2 || entry->line2 == NON_UNIQUE) continue; @@ -230,8 +233,9 @@ static struct entry *find_longest_common_sequence(struct hashmap *map) /* No common unique lines were found */ if (!longest) { + *res = NULL; xdl_free(sequence); - return NULL; + return 0; } /* Iterate starting at the last element, adjusting the "next" members */ @@ -241,8 +245,9 @@ static struct entry *find_longest_common_sequence(struct hashmap *map) entry->previous->next = entry; entry = entry->previous; } + *res = entry; xdl_free(sequence); - return entry; + return 0; } static int match(struct hashmap *map, int line1, int line2) @@ -358,14 +363,16 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2, return 0; } - first = find_longest_common_sequence(&map); + result = find_longest_common_sequence(&map, &first); + if (result) + goto out; if (first) result = walk_common_sequence(&map, first, line1, count1, line2, count2); else result = fall_back_to_classic_diff(&map, line1, count1, line2, count2); - + out: xdl_free(map.entries); return result; } @@ -373,10 +380,6 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2, int xdl_do_patience_diff(mmfile_t *file1, mmfile_t *file2, xpparam_t const *xpp, xdfenv_t *env) { - if (xdl_prepare_env(file1, file2, xpp, env) < 0) - return -1; - - /* environment is cleaned up in xdl_diff() */ return patience_diff(file1, file2, xpp, env, 1, env->xdf1.nrec, 1, env->xdf2.nrec); } |