diff options
103 files changed, 6268 insertions, 692 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5f2f884b92..73856bafc9 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -186,6 +186,11 @@ jobs: ## Unzip and remove the artifact unzip artifacts.zip rm artifacts.zip + - name: initialize vcpkg + uses: actions/checkout@v2 + with: + repository: 'microsoft/vcpkg' + path: 'compat/vcbuild/vcpkg' - name: download vcpkg artifacts shell: powershell run: | diff --git a/Documentation/RelNotes/2.31.1.txt b/Documentation/RelNotes/2.31.1.txt new file mode 100644 index 0000000000..f9b06b8e1b --- /dev/null +++ b/Documentation/RelNotes/2.31.1.txt @@ -0,0 +1,27 @@ +Git 2.31.1 Release Notes +======================== + +Fixes since v2.31 +----------------- + + * The fsmonitor interface read from its input without making sure + there is something to read from. This bug is new in 2.31 + timeframe. + + * The data structure used by fsmonitor interface was not properly + duplicated during an in-core merge, leading to use-after-free etc. + + * "git bisect" reimplemented more in C during 2.30 timeframe did not + take an annotated tag as a good/bad endpoint well. This regression + has been corrected. + + * Fix macros that can silently inject unintended null-statements. + + * CALLOC_ARRAY() macro replaces many uses of xcalloc(). + + * Update insn in Makefile comments to run fuzz-all target. + + * Fix a corner case bug in "git mv" on case insensitive systems, + which was introduced in 2.29 timeframe. + +Also contains various documentation updates and code clean-ups. diff --git a/Documentation/RelNotes/2.32.0.txt b/Documentation/RelNotes/2.32.0.txt index 6de5c3dac8..d69e69ffd7 100644 --- a/Documentation/RelNotes/2.32.0.txt +++ b/Documentation/RelNotes/2.32.0.txt @@ -8,7 +8,7 @@ Backward compatibility notes symbolic links are ignored. -Updates since v2.32 +Updates since v2.31 ------------------- UI, Workflows & Features @@ -25,11 +25,54 @@ UI, Workflows & Features * "git log --format='...'" learned "%(describe)" placeholder. + * "git repack" so far has been only capable of repacking everything + under the sun into a single pack (or split by size). A cleverer + strategy to reduce the cost of repacking a repository has been + introduced. + + * The http codepath learned to let the credential layer to cache the + password used to unlock a certificate that has successfully been + used. + + * "git commit --fixup=<commit>", which was to tweak the changes made + to the contents while keeping the original log message intact, + learned "--fixup=(amend|reword):<commit>", that can be used to + tweak both the message and the contents, and only the message, + respectively. + + * When accessing a server with a URL like https://user:pass@site/, we + did not to fall back to the basic authentication with the + credential material embedded in the URL after the "Negotiate" + authentication failed. Now we do. + + * "git send-email" learned to honor the core.hooksPath configuration. + + * "git format-patch -v<n>" learned to allow a reroll count that is + not an integer. + Performance, Internal Implementation, Development Support etc. * Rename detection rework continues. + * GIT_TEST_FAIL_PREREQS is a mechanism to skip test pieces with + prerequisites to catch broken tests that depend on the side effects + of optional pieces, but did not work at all when negative + prerequisites were involved. + (merge 27d578d904 jk/fail-prereq-testfix later to maint). + + * "git diff-index" codepath has been taught to trust fsmonitor status + to reduce number of lstat() calls. + (merge 7e5aa13d2c nk/diff-index-fsmonitor later to maint). + + * Reorganize Makefile to allow building git.o and other essential + objects without extra stuff needed only for testing. + + * Preparatory API changes for parallel checkout. + + * A simple IPC interface gets introduced to build services like + fsmonitor on top. + Fixes since v2.31 ----------------- @@ -37,29 +80,22 @@ Fixes since v2.31 * The fsmonitor interface read from its input without making sure there is something to read from. This bug is new in 2.31 timeframe. - (merge 097ea2c848 jh/fsmonitor-prework later to maint). * The data structure used by fsmonitor interface was not properly duplicated during an in-core merge, leading to use-after-free etc. - (merge 4abc57848d js/fsmonitor-unpack-fix later to maint). * "git bisect" reimplemented more in C during 2.30 timeframe did not take an annotated tag as a good/bad endpoint well. This regression has been corrected. - (merge 7730f85594 jk/bisect-peel-tag-fix later to maint). * Fix macros that can silently inject unintended null-statements. - (merge 116affac3f rs/avoid-null-statement-after-macro-call later to maint). * CALLOC_ARRAY() macro replaces many uses of xcalloc(). - (merge 1c57cc70ec rs/calloc-array later to maint). * Update insn in Makefile comments to run fuzz-all target. - (merge 68b5c3aa48 ah/make-fuzz-all-doc-update later to maint). * Fix a corner case bug in "git mv" on case insensitive systems, which was introduced in 2.29 timeframe. - (merge 93c3d297b5 tb/git-mv-icase-fix later to maint). * We had a code to diagnose and die cleanly when a required clean/smudge filter is missing, but an assert before that @@ -86,14 +122,20 @@ Fixes since v2.31 which has been corrected. (merge 75555676ad bc/clone-bare-with-conflicting-config later to maint). + * When "git checkout" removes a path that does not exist in the + commit it is checking out, it wasn't careful enough not to follow + symbolic links, which has been corrected. + (merge fab78a0c3d mt/checkout-remove-nofollow later to maint). + * Other code cleanup, docfix, build fix, etc. - (merge 486f4bd183 jc/calloc-fix later to maint). - (merge 5f70859c15 jt/clone-unborn-head later to maint). - (merge cfd409ed09 km/config-doc-typofix later to maint). - (merge 8588aa8657 jk/slimmed-down later to maint). - (merge 241b5d3ebe rs/xcalloc-takes-nelem-first later to maint). (merge f451960708 dl/cat-file-doc-cleanup later to maint). (merge 12604a8d0c sv/t9801-test-path-is-file-cleanup later to maint). (merge ea7e63921c jr/doc-ignore-typofix later to maint). (merge 23c781f173 ps/update-ref-trans-hook-doc later to maint). (merge 42efa1231a jk/filter-branch-sha256 later to maint). + (merge 4c8e3dca6e tb/push-simple-uses-branch-merge-config later to maint). + (merge 6534d436a2 bs/asciidoctor-installation-hints later to maint). + (merge 47957485b3 ab/read-tree later to maint). + (merge 2be927f3d1 ab/diff-no-index-tests later to maint). + (merge 76593c09bb ab/detox-gettext-tests later to maint). + (merge 28e29ee38b jc/doc-format-patch-clarify later to maint). diff --git a/Documentation/config/rebase.txt b/Documentation/config/rebase.txt index 214f31b451..8c979cb20f 100644 --- a/Documentation/config/rebase.txt +++ b/Documentation/config/rebase.txt @@ -1,10 +1,3 @@ -rebase.useBuiltin:: - Unused configuration variable. Used in Git versions 2.20 and - 2.21 as an escape hatch to enable the legacy shellscript - implementation of rebase. Now the built-in rewrite of it in C - is always used. Setting this will emit a warning, to alert any - remaining users that setting this now does nothing. - rebase.backend:: Default backend to use for rebasing. Possible choices are 'apply' or 'merge'. In the future, if the merge backend gains diff --git a/Documentation/git-commit.txt b/Documentation/git-commit.txt index 3fe7ef33cb..340c5fbb48 100644 --- a/Documentation/git-commit.txt +++ b/Documentation/git-commit.txt @@ -9,7 +9,7 @@ SYNOPSIS -------- [verse] 'git commit' [-a | --interactive | --patch] [-s] [-v] [-u<mode>] [--amend] - [--dry-run] [(-c | -C | --fixup | --squash) <commit>] + [--dry-run] [(-c | -C | --squash) <commit> | --fixup [(amend|reword):]<commit>)] [-F <file> | -m <msg>] [--reset-author] [--allow-empty] [--allow-empty-message] [--no-verify] [-e] [--author=<author>] [--date=<date>] [--cleanup=<mode>] [--[no-]status] @@ -87,11 +87,44 @@ OPTIONS Like '-C', but with `-c` the editor is invoked, so that the user can further edit the commit message. ---fixup=<commit>:: - Construct a commit message for use with `rebase --autosquash`. - The commit message will be the subject line from the specified - commit with a prefix of "fixup! ". See linkgit:git-rebase[1] - for details. +--fixup=[(amend|reword):]<commit>:: + Create a new commit which "fixes up" `<commit>` when applied with + `git rebase --autosquash`. Plain `--fixup=<commit>` creates a + "fixup!" commit which changes the content of `<commit>` but leaves + its log message untouched. `--fixup=amend:<commit>` is similar but + creates an "amend!" commit which also replaces the log message of + `<commit>` with the log message of the "amend!" commit. + `--fixup=reword:<commit>` creates an "amend!" commit which + replaces the log message of `<commit>` with its own log message + but makes no changes to the content of `<commit>`. ++ +The commit created by plain `--fixup=<commit>` has a subject +composed of "fixup!" followed by the subject line from <commit>, +and is recognized specially by `git rebase --autosquash`. The `-m` +option may be used to supplement the log message of the created +commit, but the additional commentary will be thrown away once the +"fixup!" commit is squashed into `<commit>` by +`git rebase --autosquash`. ++ +The commit created by `--fixup=amend:<commit>` is similar but its +subject is instead prefixed with "amend!". The log message of +<commit> is copied into the log message of the "amend!" commit and +opened in an editor so it can be refined. When `git rebase +--autosquash` squashes the "amend!" commit into `<commit>`, the +log message of `<commit>` is replaced by the refined log message +from the "amend!" commit. It is an error for the "amend!" commit's +log message to be empty unless `--allow-empty-message` is +specified. ++ +`--fixup=reword:<commit>` is shorthand for `--fixup=amend:<commit> +--only`. It creates an "amend!" commit with only a log message +(ignoring any changes staged in the index). When squashed by `git +rebase --autosquash`, it replaces the log message of `<commit>` +without making any other changes. ++ +Neither "fixup!" nor "amend!" commits change authorship of +`<commit>` when applied by `git rebase --autosquash`. +See linkgit:git-rebase[1] for details. --squash=<commit>:: Construct a commit message for use with `rebase --autosquash`. diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt index 3e49bf2210..911da181a1 100644 --- a/Documentation/git-format-patch.txt +++ b/Documentation/git-format-patch.txt @@ -36,11 +36,28 @@ SYNOPSIS DESCRIPTION ----------- -Prepare each commit with its patch in -one file per commit, formatted to resemble UNIX mailbox format. +Prepare each commit with its "patch" in +one "message" per commit, formatted to resemble a UNIX mailbox. The output of this command is convenient for e-mail submission or for use with 'git am'. +A "message" generated by the command consists of three parts: + +* A brief metadata header that begins with `From <commit>` + with a fixed `Mon Sep 17 00:00:00 2001` datestamp to help programs + like "file(1)" to recognize that the file is an output from this + command, fields that record the author identity, the author date, + and the title of the change (taken from the first paragraph of the + commit log message). + +* The second and subsequent paragraphs of the commit log message. + +* The "patch", which is the "diff -p --stat" output (see + linkgit:git-diff[1]) between the commit and its parent. + +The log message and the patch is separated by a line with a +three-dash line. + There are two ways to specify which commits to operate on. 1. A single commit, <since>, specifies that the commits leading @@ -221,6 +238,11 @@ populated with placeholder text. `--subject-prefix` option) has ` v<n>` appended to it. E.g. `--reroll-count=4` may produce `v4-0001-add-makefile.patch` file that has "Subject: [PATCH v4 1/20] Add makefile" in it. + `<n>` does not have to be an integer (e.g. "--reroll-count=4.4", + or "--reroll-count=4rev2" are allowed), but the downside of + using such a reroll-count is that the range-diff/interdiff + with the previous version does not state exactly which + version the new interation is compared against. --to=<email>:: Add a `To:` header to the email headers. This is in addition diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt index f85cb7ea93..25d9fbe37a 100644 --- a/Documentation/git-pack-objects.txt +++ b/Documentation/git-pack-objects.txt @@ -85,6 +85,16 @@ base-name:: reference was included in the resulting packfile. This can be useful to send new tags to native Git clients. +--stdin-packs:: + Read the basenames of packfiles (e.g., `pack-1234abcd.pack`) + from the standard input, instead of object names or revision + arguments. The resulting pack contains all objects listed in the + included packs (those not beginning with `^`), excluding any + objects listed in the excluded packs (beginning with `^`). ++ +Incompatible with `--revs`, or options that imply `--revs` (such as +`--all`), with the exception of `--unpacked`, which is compatible. + --window=<n>:: --depth=<n>:: These two options affect how the objects contained in diff --git a/Documentation/git-push.txt b/Documentation/git-push.txt index ab103c82cf..a953c7c387 100644 --- a/Documentation/git-push.txt +++ b/Documentation/git-push.txt @@ -600,7 +600,7 @@ EXAMPLES `git push origin`:: Without additional configuration, pushes the current branch to - the configured upstream (`remote.origin.merge` configuration + the configured upstream (`branch.<name>.merge` configuration variable) if it has the same name as the current branch, and errors out without pushing otherwise. + diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index a0487b5cc5..f08ae27e2a 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -593,16 +593,17 @@ See also INCOMPATIBLE OPTIONS below. --autosquash:: --no-autosquash:: - When the commit log message begins with "squash! ..." (or - "fixup! ..."), and there is already a commit in the todo list that - matches the same `...`, automatically modify the todo list of rebase - -i so that the commit marked for squashing comes right after the - commit to be modified, and change the action of the moved commit - from `pick` to `squash` (or `fixup`). A commit matches the `...` if - the commit subject matches, or if the `...` refers to the commit's - hash. As a fall-back, partial matches of the commit subject work, - too. The recommended way to create fixup/squash commits is by using - the `--fixup`/`--squash` options of linkgit:git-commit[1]. + When the commit log message begins with "squash! ..." or "fixup! ..." + or "amend! ...", and there is already a commit in the todo list that + matches the same `...`, automatically modify the todo list of + `rebase -i`, so that the commit marked for squashing comes right after + the commit to be modified, and change the action of the moved commit + from `pick` to `squash` or `fixup` or `fixup -C` respectively. A commit + matches the `...` if the commit subject matches, or if the `...` refers + to the commit's hash. As a fall-back, partial matches of the commit + subject work, too. The recommended way to create fixup/amend/squash + commits is by using the `--fixup`, `--fixup=amend:` or `--fixup=reword:` + and `--squash` options respectively of linkgit:git-commit[1]. + If the `--autosquash` option is enabled by default using the configuration variable `rebase.autoSquash`, this option can be @@ -887,9 +888,17 @@ If you want to fold two or more commits into one, replace the command "pick" for the second and subsequent commits with "squash" or "fixup". If the commits had different authors, the folded commit will be attributed to the author of the first commit. The suggested commit -message for the folded commit is the concatenation of the commit -messages of the first commit and of those with the "squash" command, -but omits the commit messages of commits with the "fixup" command. +message for the folded commit is the concatenation of the first +commit's message with those identified by "squash" commands, omitting the +messages of commits identified by "fixup" commands, unless "fixup -c" +is used. In that case the suggested commit message is only the message +of the "fixup -c" commit, and an editor is opened allowing you to edit +the message. The contents (patch) of the "fixup -c" commit are still +incorporated into the folded commit. If there is more than one "fixup -c" +commit, the message from the final one is used. You can also use +"fixup -C" to get the same behavior as "fixup -c" except without opening +an editor. + 'git rebase' will stop when "pick" has been replaced with "edit" or when a command fails due to merge errors. When you are done editing diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt index fbd4b4ae06..317d63cf0d 100644 --- a/Documentation/git-repack.txt +++ b/Documentation/git-repack.txt @@ -165,6 +165,29 @@ depth is 4095. Pass the `--delta-islands` option to `git-pack-objects`, see linkgit:git-pack-objects[1]. +-g=<factor>:: +--geometric=<factor>:: + Arrange resulting pack structure so that each successive pack + contains at least `<factor>` times the number of objects as the + next-largest pack. ++ +`git repack` ensures this by determining a "cut" of packfiles that need +to be repacked into one in order to ensure a geometric progression. It +picks the smallest set of packfiles such that as many of the larger +packfiles (by count of objects contained in that pack) may be left +intact. ++ +Unlike other repack modes, the set of objects to pack is determined +uniquely by the set of packs being "rolled-up"; in other words, the +packs determined to need to be combined in order to restore a geometric +progression. ++ +When `--unpacked` is specified, loose objects are implicitly included in +this "roll-up", without respect to their reachability. This is subject +to change in the future. This option (implying a drastically different +repack mode) is not guaranteed to work with all other combinations of +option to `git repack`). + CONFIGURATION ------------- diff --git a/Documentation/technical/api-simple-ipc.txt b/Documentation/technical/api-simple-ipc.txt new file mode 100644 index 0000000000..d79ad323e6 --- /dev/null +++ b/Documentation/technical/api-simple-ipc.txt @@ -0,0 +1,105 @@ +Simple-IPC API +============== + +The Simple-IPC API is a collection of `ipc_` prefixed library routines +and a basic communication protocol that allow an IPC-client process to +send an application-specific IPC-request message to an IPC-server +process and receive an application-specific IPC-response message. + +Communication occurs over a named pipe on Windows and a Unix domain +socket on other platforms. IPC-clients and IPC-servers rendezvous at +a previously agreed-to application-specific pathname (which is outside +the scope of this design) that is local to the computer system. + +The IPC-server routines within the server application process create a +thread pool to listen for connections and receive request messages +from multiple concurrent IPC-clients. When received, these messages +are dispatched up to the server application callbacks for handling. +IPC-server routines then incrementally relay responses back to the +IPC-client. + +The IPC-client routines within a client application process connect +to the IPC-server and send a request message and wait for a response. +When received, the response is returned back the caller. + +For example, the `fsmonitor--daemon` feature will be built as a server +application on top of the IPC-server library routines. It will have +threads watching for file system events and a thread pool waiting for +client connections. Clients, such as `git status` will request a list +of file system events since a point in time and the server will +respond with a list of changed files and directories. The formats of +the request and response are application-specific; the IPC-client and +IPC-server routines treat them as opaque byte streams. + + +Comparison with sub-process model +--------------------------------- + +The Simple-IPC mechanism differs from the existing `sub-process.c` +model (Documentation/technical/long-running-process-protocol.txt) and +used by applications like Git-LFS. In the LFS-style sub-process model +the helper is started by the foreground process, communication happens +via a pair of file descriptors bound to the stdin/stdout of the +sub-process, the sub-process only serves the current foreground +process, and the sub-process exits when the foreground process +terminates. + +In the Simple-IPC model the server is a very long-running service. It +can service many clients at the same time and has a private socket or +named pipe connection to each active client. It might be started +(on-demand) by the current client process or it might have been +started by a previous client or by the OS at boot time. The server +process is not associated with a terminal and it persists after +clients terminate. Clients do not have access to the stdin/stdout of +the server process and therefore must communicate over sockets or +named pipes. + + +Server startup and shutdown +--------------------------- + +How an application server based upon IPC-server is started is also +outside the scope of the Simple-IPC design and is a property of the +application using it. For example, the server might be started or +restarted during routine maintenance operations, or it might be +started as a system service during the system boot-up sequence, or it +might be started on-demand by a foreground Git command when needed. + +Similarly, server shutdown is a property of the application using +the simple-ipc routines. For example, the server might decide to +shutdown when idle or only upon explicit request. + + +Simple-IPC protocol +------------------- + +The Simple-IPC protocol consists of a single request message from the +client and an optional response message from the server. Both the +client and server messages are unlimited in length and are terminated +with a flush packet. + +The pkt-line routines (Documentation/technical/protocol-common.txt) +are used to simplify buffer management during message generation, +transmission, and reception. A flush packet is used to mark the end +of the message. This allows the sender to incrementally generate and +transmit the message. It allows the receiver to incrementally receive +the message in chunks and to know when they have received the entire +message. + +The actual byte format of the client request and server response +messages are application specific. The IPC layer transmits and +receives them as opaque byte buffers without any concern for the +content within. It is the job of the calling application layer to +understand the contents of the request and response messages. + + +Summary +------- + +Conceptually, the Simple-IPC protocol is similar to an HTTP REST +request. Clients connect, make an application-specific and +stateless request, receive an application-specific +response, and disconnect. It is a one round trip facility for +querying the server. The Simple-IPC routines hide the socket, +named pipe, and thread pool details and allow the application +layer to focus on the application at hand. @@ -197,7 +197,9 @@ Issues of note: Building and installing the pdf file additionally requires dblatex. Version >= 0.2.7 is known to work. - All formats require at least asciidoc 8.4.1. + All formats require at least asciidoc 8.4.1. Alternatively, you can + use Asciidoctor (requires Ruby) by passing USE_ASCIIDOCTOR=YesPlease + to make. You need at least Asciidoctor version 1.5. There are also "make quick-install-doc", "make quick-install-man" and "make quick-install-html" which install preformatted man pages @@ -578,7 +578,9 @@ GENERATED_H = EXTRA_CPPFLAGS = FUZZ_OBJS = FUZZ_PROGRAMS = +GIT_OBJS = LIB_OBJS = +OBJECTS = PROGRAM_OBJS = PROGRAMS = EXCLUDED_PROGRAMS = @@ -587,6 +589,7 @@ SCRIPT_PYTHON = SCRIPT_SH = SCRIPT_LIB = TEST_BUILTINS_OBJS = +TEST_OBJS = TEST_PROGRAMS_NEED_X = THIRD_PARTY_SOURCES = @@ -662,6 +665,8 @@ ETAGS_TARGET = TAGS FUZZ_OBJS += fuzz-commit-graph.o FUZZ_OBJS += fuzz-pack-headers.o FUZZ_OBJS += fuzz-pack-idx.o +.PHONY: fuzz-objs +fuzz-objs: $(FUZZ_OBJS) # Always build fuzz objects even if not testing, to prevent bit-rot. all:: $(FUZZ_OBJS) @@ -679,6 +684,8 @@ PROGRAM_OBJS += http-backend.o PROGRAM_OBJS += imap-send.o PROGRAM_OBJS += sh-i18n--envsubst.o PROGRAM_OBJS += shell.o +.PHONY: program-objs +program-objs: $(PROGRAM_OBJS) # Binary suffix, set to .exe for Windows builds X = @@ -737,6 +744,7 @@ TEST_BUILTINS_OBJS += test-serve-v2.o TEST_BUILTINS_OBJS += test-sha1.o TEST_BUILTINS_OBJS += test-sha256.o TEST_BUILTINS_OBJS += test-sigchain.o +TEST_BUILTINS_OBJS += test-simple-ipc.o TEST_BUILTINS_OBJS += test-strcmp-offset.o TEST_BUILTINS_OBJS += test-string-list.o TEST_BUILTINS_OBJS += test-submodule-config.o @@ -1672,6 +1680,14 @@ ifdef NO_UNIX_SOCKETS BASIC_CFLAGS += -DNO_UNIX_SOCKETS else LIB_OBJS += unix-socket.o + LIB_OBJS += unix-stream-server.o + LIB_OBJS += compat/simple-ipc/ipc-shared.o + LIB_OBJS += compat/simple-ipc/ipc-unix-socket.o +endif + +ifdef USE_WIN32_IPC + LIB_OBJS += compat/simple-ipc/ipc-shared.o + LIB_OBJS += compat/simple-ipc/ipc-win32.o endif ifdef NO_ICONV @@ -2378,16 +2394,30 @@ XDIFF_OBJS += xdiff/xmerge.o XDIFF_OBJS += xdiff/xpatience.o XDIFF_OBJS += xdiff/xprepare.o XDIFF_OBJS += xdiff/xutils.o +.PHONY: xdiff-objs +xdiff-objs: $(XDIFF_OBJS) TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS)) $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS)) -OBJECTS := $(LIB_OBJS) $(BUILTIN_OBJS) $(PROGRAM_OBJS) $(TEST_OBJS) \ - $(XDIFF_OBJS) \ - $(FUZZ_OBJS) \ - common-main.o \ - git.o +.PHONY: test-objs +test-objs: $(TEST_OBJS) + +GIT_OBJS += $(LIB_OBJS) +GIT_OBJS += $(BUILTIN_OBJS) +GIT_OBJS += common-main.o +GIT_OBJS += git.o +.PHONY: git-objs +git-objs: $(GIT_OBJS) + +OBJECTS += $(GIT_OBJS) +OBJECTS += $(PROGRAM_OBJS) +OBJECTS += $(TEST_OBJS) +OBJECTS += $(XDIFF_OBJS) +OBJECTS += $(FUZZ_OBJS) ifndef NO_CURL OBJECTS += http.o http-walker.o remote-curl.o endif +.PHONY: objects +objects: $(OBJECTS) dep_files := $(foreach f,$(OBJECTS),$(dir $f).depend/$(notdir $f).d) dep_dirs := $(addsuffix .depend,$(sort $(dir $(OBJECTS)))) @@ -21,6 +21,7 @@ #include "quote.h" #include "rerere.h" #include "apply.h" +#include "entry.h" struct gitdiff_data { struct strbuf *root; @@ -104,7 +104,6 @@ struct directory { struct object_id oid; int baselen, len; unsigned mode; - int stage; char path[FLEX_ARRAY]; }; @@ -135,7 +134,7 @@ static int check_attr_export_subst(const struct attr_check *check) } static int write_archive_entry(const struct object_id *oid, const char *base, - int baselen, const char *filename, unsigned mode, int stage, + int baselen, const char *filename, unsigned mode, void *context) { static struct strbuf path = STRBUF_INIT; @@ -194,7 +193,7 @@ static int write_archive_entry(const struct object_id *oid, const char *base, static void queue_directory(const unsigned char *sha1, struct strbuf *base, const char *filename, - unsigned mode, int stage, struct archiver_context *c) + unsigned mode, struct archiver_context *c) { struct directory *d; size_t len = st_add4(base->len, 1, strlen(filename), 1); @@ -202,7 +201,6 @@ static void queue_directory(const unsigned char *sha1, d->up = c->bottom; d->baselen = base->len; d->mode = mode; - d->stage = stage; c->bottom = d; d->len = xsnprintf(d->path, len, "%.*s%s/", (int)base->len, base->buf, filename); hashcpy(d->oid.hash, sha1); @@ -221,14 +219,14 @@ static int write_directory(struct archiver_context *c) write_directory(c) || write_archive_entry(&d->oid, d->path, d->baselen, d->path + d->baselen, d->mode, - d->stage, c) != READ_TREE_RECURSIVE; + c) != READ_TREE_RECURSIVE; free(d); return ret ? -1 : 0; } static int queue_or_write_archive_entry(const struct object_id *oid, struct strbuf *base, const char *filename, - unsigned mode, int stage, void *context) + unsigned mode, void *context) { struct archiver_context *c = context; @@ -253,14 +251,14 @@ static int queue_or_write_archive_entry(const struct object_id *oid, if (check_attr_export_ignore(check)) return 0; queue_directory(oid->hash, base, filename, - mode, stage, c); + mode, c); return READ_TREE_RECURSIVE; } if (write_directory(c)) return -1; return write_archive_entry(oid, base->buf, base->len, filename, mode, - stage, context); + context); } struct extra_file_info { @@ -313,10 +311,10 @@ int write_archive_entries(struct archiver_args *args, git_attr_set_direction(GIT_ATTR_INDEX); } - err = read_tree_recursive(args->repo, args->tree, "", - 0, 0, &args->pathspec, - queue_or_write_archive_entry, - &context); + err = read_tree(args->repo, args->tree, + &args->pathspec, + queue_or_write_archive_entry, + &context); if (err == READ_TREE_RECURSIVE) err = 0; while (context.bottom) { @@ -375,7 +373,7 @@ struct path_exists_context { static int reject_entry(const struct object_id *oid, struct strbuf *base, const char *filename, unsigned mode, - int stage, void *context) + void *context) { int ret = -1; struct path_exists_context *ctx = context; @@ -402,9 +400,9 @@ static int path_exists(struct archiver_args *args, const char *path) ctx.args = args; parse_pathspec(&ctx.pathspec, 0, 0, "", paths); ctx.pathspec.recursive = 1; - ret = read_tree_recursive(args->repo, args->tree, "", - 0, 0, &ctx.pathspec, - reject_entry, &ctx); + ret = read_tree(args->repo, args->tree, + &ctx.pathspec, + reject_entry, &ctx); clear_pathspec(&ctx.pathspec); return ret != 0; } diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c index 023e49e271..c0bf4ac1b2 100644 --- a/builtin/checkout-index.c +++ b/builtin/checkout-index.c @@ -11,6 +11,7 @@ #include "quote.h" #include "cache-tree.h" #include "parse-options.h" +#include "entry.h" #define CHECKOUT_ALL 4 static int nul_term_line; diff --git a/builtin/checkout.c b/builtin/checkout.c index 2d6550bc3c..4c696ef480 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -26,6 +26,7 @@ #include "unpack-trees.h" #include "wt-status.h" #include "xdiff-interface.h" +#include "entry.h" static const char * const checkout_usage[] = { N_("git checkout [<options>] <branch>"), @@ -114,7 +115,7 @@ static int post_checkout_hook(struct commit *old_commit, struct commit *new_comm } static int update_some(const struct object_id *oid, struct strbuf *base, - const char *pathname, unsigned mode, int stage, void *context) + const char *pathname, unsigned mode, void *context) { int len; struct cache_entry *ce; @@ -155,8 +156,8 @@ static int update_some(const struct object_id *oid, struct strbuf *base, static int read_tree_some(struct tree *tree, const struct pathspec *pathspec) { - read_tree_recursive(the_repository, tree, "", 0, 0, - pathspec, update_some, NULL); + read_tree(the_repository, tree, + pathspec, update_some, NULL); /* update the index with the given tree's info * for all args, expanding wildcards, and exit @@ -322,7 +323,7 @@ static void mark_ce_for_checkout_overlay(struct cache_entry *ce, * If it comes from the tree-ish, we already know it * matches the pathspec and could just stamp * CE_MATCHED to it from update_some(). But we still - * need ps_matched and read_tree_recursive (and + * need ps_matched and read_tree (and * eventually tree_entry_interesting) cannot fill * ps_matched yet. Once it can, we can avoid calling * match_pathspec() for _all_ entries when diff --git a/builtin/clone.c b/builtin/clone.c index 51e844a2de..952fe3d8fc 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -964,10 +964,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix) { int is_bundle = 0, is_local; const char *repo_name, *repo, *work_tree, *git_dir; - char *path, *dir, *display_repo = NULL; + char *path = NULL, *dir, *display_repo = NULL; int dest_exists, real_dest_exists = 0; const struct ref *refs, *remote_head; - const struct ref *remote_head_points_at; + struct ref *remote_head_points_at = NULL; const struct ref *our_head_points_at; struct ref *mapped_refs; const struct ref *ref; @@ -1017,9 +1017,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix) repo_name = argv[0]; path = get_repo_path(repo_name, &is_bundle); - if (path) + if (path) { + FREE_AND_NULL(path); repo = absolute_pathdup(repo_name); - else if (strchr(repo_name, ':')) { + } else if (strchr(repo_name, ':')) { repo = repo_name; display_repo = transport_anonymize_url(repo); } else @@ -1393,6 +1394,11 @@ cleanup: strbuf_release(&reflog_msg); strbuf_release(&branch_top); strbuf_release(&key); + free_refs(mapped_refs); + free_refs(remote_head_points_at); + free(dir); + free(path); + UNLEAK(repo); junk_mode = JUNK_LEAVE_ALL; strvec_clear(&transport_ls_refs_options.ref_prefixes); diff --git a/builtin/commit.c b/builtin/commit.c index 4b06672bd0..55d50a8891 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -105,7 +105,8 @@ static const char *template_file; */ static const char *author_message, *author_message_buffer; static char *edit_message, *use_message; -static char *fixup_message, *squash_message; +static char *fixup_message, *fixup_commit, *squash_message; +static const char *fixup_prefix; static int all, also, interactive, patch_interactive, only, amend, signoff; static int edit_flag = -1; /* unspecified */ static int quiet, verbose, no_verify, allow_empty, dry_run, renew_authorship; @@ -366,7 +367,8 @@ static const char *prepare_index(const char **argv, const char *prefix, die(_("--pathspec-file-nul requires --pathspec-from-file")); } - if (!pathspec.nr && (also || (only && !amend && !allow_empty))) + if (!pathspec.nr && (also || (only && !allow_empty && + (!amend || (fixup_message && strcmp(fixup_prefix, "amend")))))) die(_("No paths with --include/--only does not make sense.")); if (read_cache_preload(&pathspec) < 0) @@ -690,6 +692,22 @@ static void adjust_comment_line_char(const struct strbuf *sb) comment_line_char = *p; } +static void prepare_amend_commit(struct commit *commit, struct strbuf *sb, + struct pretty_print_context *ctx) +{ + const char *buffer, *subject, *fmt; + + buffer = get_commit_buffer(commit, NULL); + find_commit_subject(buffer, &subject); + /* + * If we amend the 'amend!' commit then we don't want to + * duplicate the subject line. + */ + fmt = starts_with(subject, "amend!") ? "%b" : "%B"; + format_commit_message(commit, fmt, sb, ctx); + unuse_commit_buffer(commit, buffer); +} + static int prepare_to_commit(const char *index_file, const char *prefix, struct commit *current_head, struct wt_status *s, @@ -754,15 +772,33 @@ static int prepare_to_commit(const char *index_file, const char *prefix, } else if (fixup_message) { struct pretty_print_context ctx = {0}; struct commit *commit; - commit = lookup_commit_reference_by_name(fixup_message); + char *fmt; + commit = lookup_commit_reference_by_name(fixup_commit); if (!commit) - die(_("could not lookup commit %s"), fixup_message); + die(_("could not lookup commit %s"), fixup_commit); ctx.output_encoding = get_commit_output_encoding(); - format_commit_message(commit, "fixup! %s\n\n", - &sb, &ctx); - if (have_option_m) - strbuf_addbuf(&sb, &message); + fmt = xstrfmt("%s! %%s\n\n", fixup_prefix); + format_commit_message(commit, fmt, &sb, &ctx); + free(fmt); hook_arg1 = "message"; + + /* + * Only `-m` commit message option is checked here, as + * it supports `--fixup` to append the commit message. + * + * The other commit message options `-c`/`-C`/`-F` are + * incompatible with all the forms of `--fixup` and + * have already errored out while parsing the `git commit` + * options. + */ + if (have_option_m && !strcmp(fixup_prefix, "fixup")) + strbuf_addbuf(&sb, &message); + + if (!strcmp(fixup_prefix, "amend")) { + if (have_option_m) + die(_("cannot combine -m with --fixup:%s"), fixup_message); + prepare_amend_commit(commit, &sb, &ctx); + } } else if (!stat(git_path_merge_msg(the_repository), &statbuf)) { size_t merge_msg_start; @@ -1173,6 +1209,19 @@ static void finalize_deferred_config(struct wt_status *s) s->ahead_behind_flags = AHEAD_BEHIND_FULL; } +static void check_fixup_reword_options(int argc, const char *argv[]) { + if (whence != FROM_COMMIT) { + if (whence == FROM_MERGE) + die(_("You are in the middle of a merge -- cannot reword.")); + else if (is_from_cherry_pick(whence)) + die(_("You are in the middle of a cherry-pick -- cannot reword.")); + } + if (argc) + die(_("cannot combine reword option of --fixup with path '%s'"), *argv); + if (patch_interactive || interactive || all || also || only) + die(_("reword option of --fixup is mutually exclusive with --patch/--interactive/--all/--include/--only")); +} + static int parse_and_validate_options(int argc, const char *argv[], const struct option *options, const char * const usage[], @@ -1191,7 +1240,7 @@ static int parse_and_validate_options(int argc, const char *argv[], if (force_author && renew_authorship) die(_("Using both --reset-author and --author does not make sense")); - if (logfile || have_option_m || use_message || fixup_message) + if (logfile || have_option_m || use_message) use_editor = 0; if (0 <= edit_flag) use_editor = edit_flag; @@ -1248,6 +1297,42 @@ static int parse_and_validate_options(int argc, const char *argv[], if (also + only + all + interactive > 1) die(_("Only one of --include/--only/--all/--interactive/--patch can be used.")); + + if (fixup_message) { + /* + * We limit --fixup's suboptions to only alpha characters. + * If the first character after a run of alpha is colon, + * then the part before the colon may be a known suboption + * name like `amend` or `reword`, or a misspelt suboption + * name. In either case, we treat it as + * --fixup=<suboption>:<arg>. + * + * Otherwise, we are dealing with --fixup=<commit>. + */ + char *p = fixup_message; + while (isalpha(*p)) + p++; + if (p > fixup_message && *p == ':') { + *p = '\0'; + fixup_commit = p + 1; + if (!strcmp("amend", fixup_message) || + !strcmp("reword", fixup_message)) { + fixup_prefix = "amend"; + allow_empty = 1; + if (*fixup_message == 'r') { + check_fixup_reword_options(argc, argv); + only = 1; + } + } else { + die(_("unknown option: --fixup=%s:%s"), fixup_message, fixup_commit); + } + } else { + fixup_commit = fixup_message; + fixup_prefix = "fixup"; + use_editor = 0; + } + } + cleanup_mode = get_cleanup_mode(cleanup_arg, use_editor); handle_untracked_files_arg(s); @@ -1525,7 +1610,11 @@ int cmd_commit(int argc, const char **argv, const char *prefix) OPT_CALLBACK('m', "message", &message, N_("message"), N_("commit message"), opt_parse_m), OPT_STRING('c', "reedit-message", &edit_message, N_("commit"), N_("reuse and edit message from specified commit")), OPT_STRING('C', "reuse-message", &use_message, N_("commit"), N_("reuse message from specified commit")), - OPT_STRING(0, "fixup", &fixup_message, N_("commit"), N_("use autosquash formatted message to fixup specified commit")), + /* + * TRANSLATORS: Leave "[(amend|reword):]" as-is, + * and only translate <commit>. + */ + OPT_STRING(0, "fixup", &fixup_message, N_("[(amend|reword):]commit"), N_("use autosquash formatted message to fixup or amend/reword specified commit")), OPT_STRING(0, "squash", &squash_message, N_("commit"), N_("use autosquash formatted message to squash specified commit")), OPT_BOOL(0, "reset-author", &renew_authorship, N_("the commit is authored by me now (used with -C/-c/--amend)")), OPT_CALLBACK_F(0, "trailer", NULL, N_("trailer"), N_("add custom trailer(s)"), PARSE_OPT_NONEG, opt_pass_trailer), @@ -1685,6 +1774,19 @@ int cmd_commit(int argc, const char **argv, const char *prefix) exit(1); } + if (fixup_message && starts_with(sb.buf, "amend! ") && + !allow_empty_message) { + struct strbuf body = STRBUF_INIT; + size_t len = commit_subject_length(sb.buf); + strbuf_addstr(&body, sb.buf + len); + if (message_is_empty(&body, cleanup_mode)) { + rollback_index_files(); + fprintf(stderr, _("Aborting commit due to empty commit message body.\n")); + exit(1); + } + strbuf_release(&body); + } + if (amend) { const char *exclude_gpgsig[3] = { "gpgsig", "gpgsig-sha256", NULL }; extra = read_commit_extra_headers(current_head, exclude_gpgsig); diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c index c61f123a3b..4c6c89ab0d 100644 --- a/builtin/credential-cache--daemon.c +++ b/builtin/credential-cache--daemon.c @@ -203,9 +203,10 @@ static int serve_cache_loop(int fd) static void serve_cache(const char *socket_path, int debug) { + struct unix_stream_listen_opts opts = UNIX_STREAM_LISTEN_OPTS_INIT; int fd; - fd = unix_stream_listen(socket_path); + fd = unix_stream_listen(socket_path, &opts); if (fd < 0) die_errno("unable to bind to '%s'", socket_path); diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c index 9b3f709905..76a6ba3722 100644 --- a/builtin/credential-cache.c +++ b/builtin/credential-cache.c @@ -14,7 +14,7 @@ static int send_request(const char *socket, const struct strbuf *out) { int got_data = 0; - int fd = unix_stream_connect(socket); + int fd = unix_stream_connect(socket, 0); if (fd < 0) return -1; diff --git a/builtin/difftool.c b/builtin/difftool.c index 6e18e623fd..ef25729d49 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -23,6 +23,7 @@ #include "lockfile.h" #include "object-store.h" #include "dir.h" +#include "entry.h" static int trust_exit_code; diff --git a/builtin/init-db.c b/builtin/init-db.c index f82efe4aff..c19b35f1e6 100644 --- a/builtin/init-db.c +++ b/builtin/init-db.c @@ -25,7 +25,6 @@ static int init_is_bare_repository = 0; static int init_shared_repository = -1; -static const char *init_db_template_dir; static void copy_templates_1(struct strbuf *path, struct strbuf *template_path, DIR *dir) @@ -94,7 +93,7 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template_path, } } -static void copy_templates(const char *template_dir) +static void copy_templates(const char *template_dir, const char *init_template_dir) { struct strbuf path = STRBUF_INIT; struct strbuf template_path = STRBUF_INIT; @@ -107,7 +106,7 @@ static void copy_templates(const char *template_dir) if (!template_dir) template_dir = getenv(TEMPLATE_DIR_ENVIRONMENT); if (!template_dir) - template_dir = init_db_template_dir; + template_dir = init_template_dir; if (!template_dir) template_dir = to_free = system_path(DEFAULT_GIT_TEMPLATE_DIR); if (!template_dir[0]) { @@ -154,17 +153,6 @@ free_return: clear_repository_format(&template_format); } -static int git_init_db_config(const char *k, const char *v, void *cb) -{ - if (!strcmp(k, "init.templatedir")) - return git_config_pathname(&init_db_template_dir, k, v); - - if (starts_with(k, "core.")) - return platform_core_config(k, v, cb); - - return 0; -} - /* * If the git_dir is not directly inside the working tree, then git will not * find it by default, and we need to set the worktree explicitly. @@ -212,12 +200,9 @@ static int create_default_files(const char *template_path, int reinit; int filemode; struct strbuf err = STRBUF_INIT; + const char *init_template_dir = NULL; const char *work_tree = get_git_work_tree(); - /* Just look for `init.templatedir` */ - init_db_template_dir = NULL; /* re-set in case it was set before */ - git_config(git_init_db_config, NULL); - /* * First copy the templates -- we might have the default * config file there, in which case we would want to read @@ -227,7 +212,8 @@ static int create_default_files(const char *template_path, * values (since we've just potentially changed what's available on * disk). */ - copy_templates(template_path); + git_config_get_value("init.templatedir", &init_template_dir); + copy_templates(template_path, init_template_dir); git_config_clear(); reset_shared_repository(); git_config(git_default_config, NULL); @@ -422,8 +408,8 @@ int init_db(const char *git_dir, const char *real_git_dir, } startup_info->have_repository = 1; - /* Just look for `core.hidedotfiles` */ - git_config(git_init_db_config, NULL); + /* Ensure `core.hidedotfiles` is processed */ + git_config(platform_core_config, NULL); safe_create_dir(git_dir, 0); @@ -575,8 +561,10 @@ int cmd_init_db(int argc, const char **argv, const char *prefix) if (real_git_dir && !is_absolute_path(real_git_dir)) real_git_dir = real_pathdup(real_git_dir, 1); - if (template_dir && *template_dir && !is_absolute_path(template_dir)) + if (template_dir && *template_dir && !is_absolute_path(template_dir)) { template_dir = absolute_pathdup(template_dir); + UNLEAK(template_dir); + } if (argc == 1) { int mkdir_tried = 0; diff --git a/builtin/log.c b/builtin/log.c index f67b67d80e..8acd285daf 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -599,7 +599,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev) static int show_tree_object(const struct object_id *oid, struct strbuf *base, - const char *pathname, unsigned mode, int stage, void *context) + const char *pathname, unsigned mode, void *context) { FILE *file = context; fprintf(file, "%s%s\n", pathname, S_ISDIR(mode) ? "/" : ""); @@ -681,9 +681,9 @@ int cmd_show(int argc, const char **argv, const char *prefix) diff_get_color_opt(&rev.diffopt, DIFF_COMMIT), name, diff_get_color_opt(&rev.diffopt, DIFF_RESET)); - read_tree_recursive(the_repository, (struct tree *)o, "", - 0, 0, &match_all, show_tree_object, - rev.diffopt.file); + read_tree(the_repository, (struct tree *)o, + &match_all, show_tree_object, + rev.diffopt.file); rev.shown_one = 1; break; case OBJ_COMMIT: @@ -1662,13 +1662,19 @@ static void print_bases(struct base_tree_info *bases, FILE *file) oidclr(&bases->base_commit); } -static const char *diff_title(struct strbuf *sb, int reroll_count, - const char *generic, const char *rerolled) +static const char *diff_title(struct strbuf *sb, + const char *reroll_count, + const char *generic, + const char *rerolled) { - if (reroll_count <= 0) + int v; + + /* RFC may be v0, so allow -v1 to diff against v0 */ + if (reroll_count && !strtol_i(reroll_count, 10, &v) && + v >= 1) + strbuf_addf(sb, rerolled, v - 1); + else strbuf_addstr(sb, generic); - else /* RFC may be v0, so allow -v1 to diff against v0 */ - strbuf_addf(sb, rerolled, reroll_count - 1); return sb->buf; } @@ -1717,7 +1723,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) struct strbuf buf = STRBUF_INIT; int use_patch_format = 0; int quiet = 0; - int reroll_count = -1; + const char *reroll_count = NULL; char *cover_from_description_arg = NULL; char *branch_name = NULL; char *base_commit = NULL; @@ -1751,7 +1757,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) N_("use <sfx> instead of '.patch'")), OPT_INTEGER(0, "start-number", &start_number, N_("start numbering patches at <n> instead of 1")), - OPT_INTEGER('v', "reroll-count", &reroll_count, + OPT_STRING('v', "reroll-count", &reroll_count, N_("reroll-count"), N_("mark the series as Nth re-roll")), OPT_INTEGER(0, "filename-max-length", &fmt_patch_name_max, N_("max length of output filename")), @@ -1862,9 +1868,10 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) if (cover_from_description_arg) cover_from_description_mode = parse_cover_from_description(cover_from_description_arg); - if (0 < reroll_count) { + if (reroll_count) { struct strbuf sprefix = STRBUF_INIT; - strbuf_addf(&sprefix, "%s v%d", + + strbuf_addf(&sprefix, "%s v%s", rev.subject_prefix, reroll_count); rev.reroll_count = reroll_count; rev.subject_prefix = strbuf_detach(&sprefix, NULL); diff --git a/builtin/ls-files.c b/builtin/ls-files.c index f6f9e483b2..60a2913a01 100644 --- a/builtin/ls-files.c +++ b/builtin/ls-files.c @@ -12,6 +12,7 @@ #include "dir.h" #include "builtin.h" #include "tree.h" +#include "cache-tree.h" #include "parse-options.h" #include "resolve-undo.h" #include "string-list.h" @@ -420,6 +421,53 @@ static int get_common_prefix_len(const char *common_prefix) return common_prefix_len; } +static int read_one_entry_opt(struct index_state *istate, + const struct object_id *oid, + struct strbuf *base, + const char *pathname, + unsigned mode, int opt) +{ + int len; + struct cache_entry *ce; + + if (S_ISDIR(mode)) + return READ_TREE_RECURSIVE; + + len = strlen(pathname); + ce = make_empty_cache_entry(istate, base->len + len); + + ce->ce_mode = create_ce_mode(mode); + ce->ce_flags = create_ce_flags(1); + ce->ce_namelen = base->len + len; + memcpy(ce->name, base->buf, base->len); + memcpy(ce->name + base->len, pathname, len+1); + oidcpy(&ce->oid, oid); + return add_index_entry(istate, ce, opt); +} + +static int read_one_entry(const struct object_id *oid, struct strbuf *base, + const char *pathname, unsigned mode, + void *context) +{ + struct index_state *istate = context; + return read_one_entry_opt(istate, oid, base, pathname, + mode, + ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK); +} + +/* + * This is used when the caller knows there is no existing entries at + * the stage that will conflict with the entry being added. + */ +static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base, + const char *pathname, unsigned mode, + void *context) +{ + struct index_state *istate = context; + return read_one_entry_opt(istate, oid, base, pathname, + mode, ADD_CACHE_JUST_APPEND); +} + /* * Read the tree specified with --with-tree option * (typically, HEAD) into stage #1 and then @@ -436,6 +484,8 @@ void overlay_tree_on_index(struct index_state *istate, struct pathspec pathspec; struct cache_entry *last_stage0 = NULL; int i; + read_tree_fn_t fn = NULL; + int err; if (get_oid(tree_name, &oid)) die("tree-ish %s not found.", tree_name); @@ -458,9 +508,32 @@ void overlay_tree_on_index(struct index_state *istate, PATHSPEC_PREFER_CWD, prefix, matchbuf); } else memset(&pathspec, 0, sizeof(pathspec)); - if (read_tree(the_repository, tree, 1, &pathspec, istate)) + + /* + * See if we have cache entry at the stage. If so, + * do it the original slow way, otherwise, append and then + * sort at the end. + */ + for (i = 0; !fn && i < istate->cache_nr; i++) { + const struct cache_entry *ce = istate->cache[i]; + if (ce_stage(ce) == 1) + fn = read_one_entry; + } + + if (!fn) + fn = read_one_entry_quick; + err = read_tree(the_repository, tree, &pathspec, fn, istate); + if (err) die("unable to read tree entries %s", tree_name); + /* + * Sort the cache entry -- we need to nuke the cache tree, though. + */ + if (fn == read_one_entry_quick) { + cache_tree_free(&istate->cache_tree); + QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare); + } + for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce = istate->cache[i]; switch (ce_stage(ce)) { diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c index abfa984737..1794548c71 100644 --- a/builtin/ls-remote.c +++ b/builtin/ls-remote.c @@ -124,8 +124,6 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix) int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport)); repo_set_hash_algo(the_repository, hash_algo); } - if (transport_disconnect(transport)) - return 1; if (!dest && !quiet) fprintf(stderr, "From %s\n", *remote->url); @@ -151,5 +149,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix) } ref_array_clear(&ref_array); + if (transport_disconnect(transport)) + return 1; return status; } diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c index 7cad3f24eb..3a442631c7 100644 --- a/builtin/ls-tree.c +++ b/builtin/ls-tree.c @@ -62,7 +62,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname) } static int show_tree(const struct object_id *oid, struct strbuf *base, - const char *pathname, unsigned mode, int stage, void *context) + const char *pathname, unsigned mode, void *context) { int retval = 0; int baselen; @@ -185,6 +185,6 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix) tree = parse_tree_indirect(&oid); if (!tree) die("not a tree object"); - return !!read_tree_recursive(the_repository, tree, "", 0, 0, - &pathspec, show_tree, NULL); + return !!read_tree(the_repository, tree, + &pathspec, show_tree, NULL); } diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 4bb602688c..525c2d8552 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -815,8 +815,8 @@ static struct reused_chunk { /* The offset of the first object of this chunk in the original * packfile. */ off_t original; - /* The offset of the first object of this chunk in the generated - * packfile minus "original". */ + /* The difference for "original" minus the offset of the first object of + * this chunk in the generated packfile. */ off_t difference; } *reused_chunks; static int reused_chunks_nr; @@ -1188,7 +1188,8 @@ static int have_duplicate_entry(const struct object_id *oid, return 1; } -static int want_found_object(int exclude, struct packed_git *p) +static int want_found_object(const struct object_id *oid, int exclude, + struct packed_git *p) { if (exclude) return 1; @@ -1204,27 +1205,82 @@ static int want_found_object(int exclude, struct packed_git *p) * make sure no copy of this object appears in _any_ pack that makes us * to omit the object, so we need to check all the packs. * - * We can however first check whether these options can possible matter; + * We can however first check whether these options can possibly matter; * if they do not matter we know we want the object in generated pack. * Otherwise, we signal "-1" at the end to tell the caller that we do * not know either way, and it needs to check more packs. */ - if (!ignore_packed_keep_on_disk && - !ignore_packed_keep_in_core && - (!local || !have_non_local_packs)) - return 1; + /* + * Objects in packs borrowed from elsewhere are discarded regardless of + * if they appear in other packs that weren't borrowed. + */ if (local && !p->pack_local) return 0; - if (p->pack_local && - ((ignore_packed_keep_on_disk && p->pack_keep) || - (ignore_packed_keep_in_core && p->pack_keep_in_core))) - return 0; + + /* + * Then handle .keep first, as we have a fast(er) path there. + */ + if (ignore_packed_keep_on_disk || ignore_packed_keep_in_core) { + /* + * Set the flags for the kept-pack cache to be the ones we want + * to ignore. + * + * That is, if we are ignoring objects in on-disk keep packs, + * then we want to search through the on-disk keep and ignore + * the in-core ones. + */ + unsigned flags = 0; + if (ignore_packed_keep_on_disk) + flags |= ON_DISK_KEEP_PACKS; + if (ignore_packed_keep_in_core) + flags |= IN_CORE_KEEP_PACKS; + + if (ignore_packed_keep_on_disk && p->pack_keep) + return 0; + if (ignore_packed_keep_in_core && p->pack_keep_in_core) + return 0; + if (has_object_kept_pack(oid, flags)) + return 0; + } + + /* + * At this point we know definitively that either we don't care about + * keep-packs, or the object is not in one. Keep checking other + * conditions... + */ + if (!local || !have_non_local_packs) + return 1; /* we don't know yet; keep looking for more packs */ return -1; } +static int want_object_in_pack_one(struct packed_git *p, + const struct object_id *oid, + int exclude, + struct packed_git **found_pack, + off_t *found_offset) +{ + off_t offset; + + if (p == *found_pack) + offset = *found_offset; + else + offset = find_pack_entry_one(oid->hash, p); + + if (offset) { + if (!*found_pack) { + if (!is_pack_valid(p)) + return -1; + *found_offset = offset; + *found_pack = p; + } + return want_found_object(oid, exclude, p); + } + return -1; +} + /* * Check whether we want the object in the pack (e.g., we do not want * objects found in non-local stores if the "--local" option was used). @@ -1252,7 +1308,7 @@ static int want_object_in_pack(const struct object_id *oid, * are present we will determine the answer right now. */ if (*found_pack) { - want = want_found_object(exclude, *found_pack); + want = want_found_object(oid, exclude, *found_pack); if (want != -1) return want; } @@ -1260,51 +1316,20 @@ static int want_object_in_pack(const struct object_id *oid, for (m = get_multi_pack_index(the_repository); m; m = m->next) { struct pack_entry e; if (fill_midx_entry(the_repository, oid, &e, m)) { - struct packed_git *p = e.p; - off_t offset; - - if (p == *found_pack) - offset = *found_offset; - else - offset = find_pack_entry_one(oid->hash, p); - - if (offset) { - if (!*found_pack) { - if (!is_pack_valid(p)) - continue; - *found_offset = offset; - *found_pack = p; - } - want = want_found_object(exclude, p); - if (want != -1) - return want; - } + want = want_object_in_pack_one(e.p, oid, exclude, found_pack, found_offset); + if (want != -1) + return want; } } list_for_each(pos, get_packed_git_mru(the_repository)) { struct packed_git *p = list_entry(pos, struct packed_git, mru); - off_t offset; - - if (p == *found_pack) - offset = *found_offset; - else - offset = find_pack_entry_one(oid->hash, p); - - if (offset) { - if (!*found_pack) { - if (!is_pack_valid(p)) - continue; - *found_offset = offset; - *found_pack = p; - } - want = want_found_object(exclude, p); - if (!exclude && want > 0) - list_move(&p->mru, - get_packed_git_mru(the_repository)); - if (want != -1) - return want; - } + want = want_object_in_pack_one(p, oid, exclude, found_pack, found_offset); + if (!exclude && want > 0) + list_move(&p->mru, + get_packed_git_mru(the_repository)); + if (want != -1) + return want; } if (uri_protocols.nr) { @@ -2986,6 +3011,191 @@ static int git_pack_config(const char *k, const char *v, void *cb) return git_default_config(k, v, cb); } +/* Counters for trace2 output when in --stdin-packs mode. */ +static int stdin_packs_found_nr; +static int stdin_packs_hints_nr; + +static int add_object_entry_from_pack(const struct object_id *oid, + struct packed_git *p, + uint32_t pos, + void *_data) +{ + struct rev_info *revs = _data; + struct object_info oi = OBJECT_INFO_INIT; + off_t ofs; + enum object_type type; + + display_progress(progress_state, ++nr_seen); + + if (have_duplicate_entry(oid, 0)) + return 0; + + ofs = nth_packed_object_offset(p, pos); + if (!want_object_in_pack(oid, 0, &p, &ofs)) + return 0; + + oi.typep = &type; + if (packed_object_info(the_repository, p, ofs, &oi) < 0) + die(_("could not get type of object %s in pack %s"), + oid_to_hex(oid), p->pack_name); + else if (type == OBJ_COMMIT) { + /* + * commits in included packs are used as starting points for the + * subsequent revision walk + */ + add_pending_oid(revs, NULL, oid, 0); + } + + stdin_packs_found_nr++; + + create_object_entry(oid, type, 0, 0, 0, p, ofs); + + return 0; +} + +static void show_commit_pack_hint(struct commit *commit, void *_data) +{ + /* nothing to do; commits don't have a namehash */ +} + +static void show_object_pack_hint(struct object *object, const char *name, + void *_data) +{ + struct object_entry *oe = packlist_find(&to_pack, &object->oid); + if (!oe) + return; + + /* + * Our 'to_pack' list was constructed by iterating all objects packed in + * included packs, and so doesn't have a non-zero hash field that you + * would typically pick up during a reachability traversal. + * + * Make a best-effort attempt to fill in the ->hash and ->no_try_delta + * here using a now in order to perhaps improve the delta selection + * process. + */ + oe->hash = pack_name_hash(name); + oe->no_try_delta = name && no_try_delta(name); + + stdin_packs_hints_nr++; +} + +static int pack_mtime_cmp(const void *_a, const void *_b) +{ + struct packed_git *a = ((const struct string_list_item*)_a)->util; + struct packed_git *b = ((const struct string_list_item*)_b)->util; + + /* + * order packs by descending mtime so that objects are laid out + * roughly as newest-to-oldest + */ + if (a->mtime < b->mtime) + return 1; + else if (b->mtime < a->mtime) + return -1; + else + return 0; +} + +static void read_packs_list_from_stdin(void) +{ + struct strbuf buf = STRBUF_INIT; + struct string_list include_packs = STRING_LIST_INIT_DUP; + struct string_list exclude_packs = STRING_LIST_INIT_DUP; + struct string_list_item *item = NULL; + + struct packed_git *p; + struct rev_info revs; + + repo_init_revisions(the_repository, &revs, NULL); + /* + * Use a revision walk to fill in the namehash of objects in the include + * packs. To save time, we'll avoid traversing through objects that are + * in excluded packs. + * + * That may cause us to avoid populating all of the namehash fields of + * all included objects, but our goal is best-effort, since this is only + * an optimization during delta selection. + */ + revs.no_kept_objects = 1; + revs.keep_pack_cache_flags |= IN_CORE_KEEP_PACKS; + revs.blob_objects = 1; + revs.tree_objects = 1; + revs.tag_objects = 1; + revs.ignore_missing_links = 1; + + while (strbuf_getline(&buf, stdin) != EOF) { + if (!buf.len) + continue; + + if (*buf.buf == '^') + string_list_append(&exclude_packs, buf.buf + 1); + else + string_list_append(&include_packs, buf.buf); + + strbuf_reset(&buf); + } + + string_list_sort(&include_packs); + string_list_sort(&exclude_packs); + + for (p = get_all_packs(the_repository); p; p = p->next) { + const char *pack_name = pack_basename(p); + + item = string_list_lookup(&include_packs, pack_name); + if (!item) + item = string_list_lookup(&exclude_packs, pack_name); + + if (item) + item->util = p; + } + + /* + * First handle all of the excluded packs, marking them as kept in-core + * so that later calls to add_object_entry() discards any objects that + * are also found in excluded packs. + */ + for_each_string_list_item(item, &exclude_packs) { + struct packed_git *p = item->util; + if (!p) + die(_("could not find pack '%s'"), item->string); + p->pack_keep_in_core = 1; + } + + /* + * Order packs by ascending mtime; use QSORT directly to access the + * string_list_item's ->util pointer, which string_list_sort() does not + * provide. + */ + QSORT(include_packs.items, include_packs.nr, pack_mtime_cmp); + + for_each_string_list_item(item, &include_packs) { + struct packed_git *p = item->util; + if (!p) + die(_("could not find pack '%s'"), item->string); + for_each_object_in_pack(p, + add_object_entry_from_pack, + &revs, + FOR_EACH_OBJECT_PACK_ORDER); + } + + if (prepare_revision_walk(&revs)) + die(_("revision walk setup failed")); + traverse_commit_list(&revs, + show_commit_pack_hint, + show_object_pack_hint, + NULL); + + trace2_data_intmax("pack-objects", the_repository, "stdin_packs_found", + stdin_packs_found_nr); + trace2_data_intmax("pack-objects", the_repository, "stdin_packs_hints", + stdin_packs_hints_nr); + + strbuf_release(&buf); + string_list_clear(&include_packs, 0); + string_list_clear(&exclude_packs, 0); +} + static void read_object_list_from_stdin(void) { char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2]; @@ -3489,6 +3699,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) struct strvec rp = STRVEC_INIT; int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0; int rev_list_index = 0; + int stdin_packs = 0; struct string_list keep_pack_list = STRING_LIST_INIT_NODUP; struct option pack_objects_options[] = { OPT_SET_INT('q', "quiet", &progress, @@ -3539,6 +3750,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) OPT_SET_INT_F(0, "indexed-objects", &rev_list_index, N_("include objects referred to by the index"), 1, PARSE_OPT_NONEG), + OPT_BOOL(0, "stdin-packs", &stdin_packs, + N_("read packs from stdin")), OPT_BOOL(0, "stdout", &pack_to_stdout, N_("output pack to stdout")), OPT_BOOL(0, "include-tag", &include_tag, @@ -3645,7 +3858,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) use_internal_rev_list = 1; strvec_push(&rp, "--indexed-objects"); } - if (rev_list_unpacked) { + if (rev_list_unpacked && !stdin_packs) { use_internal_rev_list = 1; strvec_push(&rp, "--unpacked"); } @@ -3690,8 +3903,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) if (filter_options.choice) { if (!pack_to_stdout) die(_("cannot use --filter without --stdout")); + if (stdin_packs) + die(_("cannot use --filter with --stdin-packs")); } + if (stdin_packs && use_internal_rev_list) + die(_("cannot use internal rev list with --stdin-packs")); + /* * "soft" reasons not to use bitmaps - for on-disk repack by default we want * @@ -3750,7 +3968,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) if (progress) progress_state = start_progress(_("Enumerating objects"), 0); - if (!use_internal_rev_list) + if (stdin_packs) { + /* avoids adding objects in excluded packs */ + ignore_packed_keep_in_core = 1; + read_packs_list_from_stdin(); + if (rev_list_unpacked) + add_unreachable_loose_objects(); + } else if (!use_internal_rev_list) read_object_list_from_stdin(); else { get_object_list(rp.nr, rp.v); diff --git a/builtin/rebase.c b/builtin/rebase.c index de400f9a19..783b526f6e 100644 --- a/builtin/rebase.c +++ b/builtin/rebase.c @@ -100,7 +100,6 @@ struct rebase_options { char *strategy, *strategy_opts; struct strbuf git_format_patch_opt; int reschedule_failed_exec; - int use_legacy_rebase; int reapply_cherry_picks; int fork_point; }; @@ -1102,11 +1101,6 @@ static int rebase_config(const char *var, const char *value, void *data) return 0; } - if (!strcmp(var, "rebase.usebuiltin")) { - opts->use_legacy_rebase = !git_config_bool(var, value); - return 0; - } - if (!strcmp(var, "rebase.backend")) { return git_config_string(&opts->default_backend, var, value); } @@ -1441,11 +1435,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) gpg_sign = options.gpg_sign_opt ? "" : NULL; FREE_AND_NULL(options.gpg_sign_opt); - if (options.use_legacy_rebase || - !git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1)) - warning(_("the rebase.useBuiltin support has been removed!\n" - "See its entry in 'git help config' for details.")); - strbuf_reset(&buf); strbuf_addf(&buf, "%s/applying", apply_dir()); if(file_exists(buf.buf)) diff --git a/builtin/remote.c b/builtin/remote.c index 717b662d45..7f88e6ce9d 100644 --- a/builtin/remote.c +++ b/builtin/remote.c @@ -938,9 +938,6 @@ static int get_remote_ref_states(const char *name, struct ref_states *states, int query) { - struct transport *transport; - const struct ref *remote_refs; - states->remote = remote_get(name); if (!states->remote) return error(_("No such remote: '%s'"), name); @@ -948,10 +945,12 @@ static int get_remote_ref_states(const char *name, read_branches(); if (query) { + struct transport *transport; + const struct ref *remote_refs; + transport = transport_get(states->remote, states->remote->url_nr > 0 ? states->remote->url[0] : NULL); remote_refs = transport_get_remote_refs(transport, NULL); - transport_disconnect(transport); states->queried = 1; if (query & GET_REF_STATES) @@ -960,6 +959,7 @@ static int get_remote_ref_states(const char *name, get_head_names(remote_refs, states); if (query & GET_PUSH_REF_STATES) get_push_ref_states(remote_refs, states); + transport_disconnect(transport); } else { for_each_ref(append_ref_to_tracked_list, states); string_list_sort(&states->tracked); diff --git a/builtin/repack.c b/builtin/repack.c index 01440de2d5..6ce2556c9e 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -297,6 +297,142 @@ static void repack_promisor_objects(const struct pack_objects_args *args, #define ALL_INTO_ONE 1 #define LOOSEN_UNREACHABLE 2 +struct pack_geometry { + struct packed_git **pack; + uint32_t pack_nr, pack_alloc; + uint32_t split; +}; + +static uint32_t geometry_pack_weight(struct packed_git *p) +{ + if (open_pack_index(p)) + die(_("cannot open index for %s"), p->pack_name); + return p->num_objects; +} + +static int geometry_cmp(const void *va, const void *vb) +{ + uint32_t aw = geometry_pack_weight(*(struct packed_git **)va), + bw = geometry_pack_weight(*(struct packed_git **)vb); + + if (aw < bw) + return -1; + if (aw > bw) + return 1; + return 0; +} + +static void init_pack_geometry(struct pack_geometry **geometry_p) +{ + struct packed_git *p; + struct pack_geometry *geometry; + + *geometry_p = xcalloc(1, sizeof(struct pack_geometry)); + geometry = *geometry_p; + + for (p = get_all_packs(the_repository); p; p = p->next) { + if (!pack_kept_objects && p->pack_keep) + continue; + + ALLOC_GROW(geometry->pack, + geometry->pack_nr + 1, + geometry->pack_alloc); + + geometry->pack[geometry->pack_nr] = p; + geometry->pack_nr++; + } + + QSORT(geometry->pack, geometry->pack_nr, geometry_cmp); +} + +static void split_pack_geometry(struct pack_geometry *geometry, int factor) +{ + uint32_t i; + uint32_t split; + off_t total_size = 0; + + if (!geometry->pack_nr) { + geometry->split = geometry->pack_nr; + return; + } + + /* + * First, count the number of packs (in descending order of size) which + * already form a geometric progression. + */ + for (i = geometry->pack_nr - 1; i > 0; i--) { + struct packed_git *ours = geometry->pack[i]; + struct packed_git *prev = geometry->pack[i - 1]; + + if (unsigned_mult_overflows(factor, geometry_pack_weight(prev))) + die(_("pack %s too large to consider in geometric " + "progression"), + prev->pack_name); + + if (geometry_pack_weight(ours) < factor * geometry_pack_weight(prev)) + break; + } + + split = i; + + if (split) { + /* + * Move the split one to the right, since the top element in the + * last-compared pair can't be in the progression. Only do this + * when we split in the middle of the array (otherwise if we got + * to the end, then the split is in the right place). + */ + split++; + } + + /* + * Then, anything to the left of 'split' must be in a new pack. But, + * creating that new pack may cause packs in the heavy half to no longer + * form a geometric progression. + * + * Compute an expected size of the new pack, and then determine how many + * packs in the heavy half need to be joined into it (if any) to restore + * the geometric progression. + */ + for (i = 0; i < split; i++) { + struct packed_git *p = geometry->pack[i]; + + if (unsigned_add_overflows(total_size, geometry_pack_weight(p))) + die(_("pack %s too large to roll up"), p->pack_name); + total_size += geometry_pack_weight(p); + } + for (i = split; i < geometry->pack_nr; i++) { + struct packed_git *ours = geometry->pack[i]; + + if (unsigned_mult_overflows(factor, total_size)) + die(_("pack %s too large to roll up"), ours->pack_name); + + if (geometry_pack_weight(ours) < factor * total_size) { + if (unsigned_add_overflows(total_size, + geometry_pack_weight(ours))) + die(_("pack %s too large to roll up"), + ours->pack_name); + + split++; + total_size += geometry_pack_weight(ours); + } else + break; + } + + geometry->split = split; +} + +static void clear_pack_geometry(struct pack_geometry *geometry) +{ + if (!geometry) + return; + + free(geometry->pack); + geometry->pack_nr = 0; + geometry->pack_alloc = 0; + geometry->split = 0; +} + int cmd_repack(int argc, const char **argv, const char *prefix) { struct child_process cmd = CHILD_PROCESS_INIT; @@ -304,6 +440,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix) struct string_list names = STRING_LIST_INIT_DUP; struct string_list rollback = STRING_LIST_INIT_NODUP; struct string_list existing_packs = STRING_LIST_INIT_DUP; + struct pack_geometry *geometry = NULL; struct strbuf line = STRBUF_INIT; int i, ext, ret; FILE *out; @@ -316,6 +453,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix) struct string_list keep_pack_list = STRING_LIST_INIT_NODUP; int no_update_server_info = 0; struct pack_objects_args po_args = {NULL}; + int geometric_factor = 0; struct option builtin_repack_options[] = { OPT_BIT('a', NULL, &pack_everything, @@ -356,6 +494,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix) N_("repack objects in packs marked with .keep")), OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"), N_("do not repack this pack")), + OPT_INTEGER('g', "geometric", &geometric_factor, + N_("find a geometric progression with factor <N>")), OPT_END() }; @@ -382,6 +522,13 @@ int cmd_repack(int argc, const char **argv, const char *prefix) if (write_bitmaps && !(pack_everything & ALL_INTO_ONE)) die(_(incremental_bitmap_conflict_error)); + if (geometric_factor) { + if (pack_everything) + die(_("--geometric is incompatible with -A, -a")); + init_pack_geometry(&geometry); + split_pack_geometry(geometry, geometric_factor); + } + packdir = mkpathdup("%s/pack", get_object_directory()); packtmp = mkpathdup("%s/.tmp-%d-pack", packdir, (int)getpid()); @@ -396,9 +543,21 @@ int cmd_repack(int argc, const char **argv, const char *prefix) strvec_pushf(&cmd.args, "--keep-pack=%s", keep_pack_list.items[i].string); strvec_push(&cmd.args, "--non-empty"); - strvec_push(&cmd.args, "--all"); - strvec_push(&cmd.args, "--reflog"); - strvec_push(&cmd.args, "--indexed-objects"); + if (!geometry) { + /* + * We need to grab all reachable objects, including those that + * are reachable from reflogs and the index. + * + * When repacking into a geometric progression of packs, + * however, we ask 'git pack-objects --stdin-packs', and it is + * not about packing objects based on reachability but about + * repacking all the objects in specified packs and loose ones + * (indeed, --stdin-packs is incompatible with these options). + */ + strvec_push(&cmd.args, "--all"); + strvec_push(&cmd.args, "--reflog"); + strvec_push(&cmd.args, "--indexed-objects"); + } if (has_promisor_remote()) strvec_push(&cmd.args, "--exclude-promisor-objects"); if (write_bitmaps > 0) @@ -429,17 +588,37 @@ int cmd_repack(int argc, const char **argv, const char *prefix) strvec_push(&cmd.env_array, "GIT_REF_PARANOIA=1"); } } + } else if (geometry) { + strvec_push(&cmd.args, "--stdin-packs"); + strvec_push(&cmd.args, "--unpacked"); } else { strvec_push(&cmd.args, "--unpacked"); strvec_push(&cmd.args, "--incremental"); } - cmd.no_stdin = 1; + if (geometry) + cmd.in = -1; + else + cmd.no_stdin = 1; ret = start_command(&cmd); if (ret) return ret; + if (geometry) { + FILE *in = xfdopen(cmd.in, "w"); + /* + * The resulting pack should contain all objects in packs that + * are going to be rolled up, but exclude objects in packs which + * are being left alone. + */ + for (i = 0; i < geometry->split; i++) + fprintf(in, "%s\n", pack_basename(geometry->pack[i])); + for (i = geometry->split; i < geometry->pack_nr; i++) + fprintf(in, "^%s\n", pack_basename(geometry->pack[i])); + fclose(in); + } + out = xfdopen(cmd.out, "r"); while (strbuf_getline_lf(&line, out) != EOF) { if (line.len != the_hash_algo->hexsz) @@ -507,6 +686,25 @@ int cmd_repack(int argc, const char **argv, const char *prefix) if (!string_list_has_string(&names, sha1)) remove_redundant_pack(packdir, item->string); } + + if (geometry) { + struct strbuf buf = STRBUF_INIT; + + uint32_t i; + for (i = 0; i < geometry->split; i++) { + struct packed_git *p = geometry->pack[i]; + if (string_list_has_string(&names, + hash_to_hex(p->hash))) + continue; + + strbuf_reset(&buf); + strbuf_addstr(&buf, pack_basename(p)); + strbuf_strip_suffix(&buf, ".pack"); + + remove_redundant_pack(packdir, buf.buf); + } + strbuf_release(&buf); + } if (!po_args.quiet && isatty(2)) opts |= PRUNE_PACKED_VERBOSE; prune_packed_objects(opts); @@ -528,6 +726,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix) string_list_clear(&names, 0); string_list_clear(&rollback, 0); string_list_clear(&existing_packs, 0); + clear_pack_geometry(geometry); strbuf_release(&line); return 0; diff --git a/builtin/reset.c b/builtin/reset.c index c635b062c3..43e855cb88 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -425,7 +425,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix) dwim_ref(rev, strlen(rev), &dummy, &ref, 0); if (ref && !starts_with(ref, "refs/")) - ref = NULL; + FREE_AND_NULL(ref); err = reset_index(ref, &oid, reset_type, quiet); if (reset_type == KEEP && !err) diff --git a/builtin/stash.c b/builtin/stash.c index 3477e940e3..c56fed3354 100644 --- a/builtin/stash.c +++ b/builtin/stash.c @@ -10,11 +10,13 @@ #include "strvec.h" #include "run-command.h" #include "dir.h" +#include "entry.h" #include "rerere.h" #include "revision.h" #include "log-tree.h" #include "diffcore.h" #include "exec-cmd.h" +#include "entry.h" #define INCLUDE_ALL_FILES 2 diff --git a/builtin/symbolic-ref.c b/builtin/symbolic-ref.c index 80237f0df1..e547a08d6c 100644 --- a/builtin/symbolic-ref.c +++ b/builtin/symbolic-ref.c @@ -24,9 +24,11 @@ static int check_symref(const char *HEAD, int quiet, int shorten, int print) return 1; } if (print) { + char *to_free = NULL; if (shorten) - refname = shorten_unambiguous_ref(refname, 0); + refname = to_free = shorten_unambiguous_ref(refname, 0); puts(refname); + free(to_free); } return 0; } diff --git a/builtin/worktree.c b/builtin/worktree.c index 1cd5c2016e..8771453493 100644 --- a/builtin/worktree.c +++ b/builtin/worktree.c @@ -446,16 +446,18 @@ static void print_preparing_worktree_line(int detach, static const char *dwim_branch(const char *path, const char **new_branch) { int n; + int branch_exists; const char *s = worktree_basename(path, &n); const char *branchname = xstrndup(s, n); struct strbuf ref = STRBUF_INIT; UNLEAK(branchname); - if (!strbuf_check_branch_ref(&ref, branchname) && - ref_exists(ref.buf)) { - strbuf_release(&ref); + + branch_exists = !strbuf_check_branch_ref(&ref, branchname) && + ref_exists(ref.buf); + strbuf_release(&ref); + if (branch_exists) return branchname; - } *new_branch = branchname; if (guess_remote) { @@ -803,7 +803,7 @@ static inline int index_pos_to_insert_pos(uintmax_t pos) #define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */ #define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */ #define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */ -#define ADD_CACHE_JUST_APPEND 8 /* Append only; tree.c::read_tree() */ +#define ADD_CACHE_JUST_APPEND 8 /* Append only */ #define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */ #define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */ #define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */ @@ -1621,30 +1621,6 @@ const char *show_ident_date(const struct ident_split *id, */ int ident_cmp(const struct ident_split *, const struct ident_split *); -struct checkout { - struct index_state *istate; - const char *base_dir; - int base_dir_len; - struct delayed_checkout *delayed_checkout; - struct checkout_metadata meta; - unsigned force:1, - quiet:1, - not_new:1, - clone:1, - refresh_cache:1; -}; -#define CHECKOUT_INIT { NULL, "" } - -#define TEMPORARY_FILENAME_LENGTH 25 -int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts); -void enable_delayed_checkout(struct checkout *state); -int finish_delayed_checkout(struct checkout *state, int *nr_checkouts); -/* - * Unlink the last component and schedule the leading directories for - * removal, such that empty directories get removed. - */ -void unlink_entry(const struct cache_entry *ce); - struct cache_def { struct strbuf path; int flags; @@ -1659,7 +1635,7 @@ static inline void cache_def_clear(struct cache_def *cache) int has_symlink_leading_path(const char *name, int len); int threaded_has_symlink_leading_path(struct cache_def *, const char *, int); -int check_leading_path(const char *name, int len); +int check_leading_path(const char *name, int len, int warn_on_lstat_err); int has_dirs_only_path(const char *name, int len, int prefix_len); void invalidate_lstat_cache(void); void schedule_dir_for_removal(const char *name, int len); @@ -535,6 +535,20 @@ int find_commit_subject(const char *commit_buffer, const char **subject) return eol - p; } +size_t commit_subject_length(const char *body) +{ + const char *p = body; + while (*p) { + const char *next = skip_blank_lines(p); + if (next != p) + break; + p = strchrnul(p, '\n'); + if (*p) + p++; + } + return p - body; +} + struct commit_list *commit_list_insert(struct commit *item, struct commit_list **list_p) { struct commit_list *new_list = xmalloc(sizeof(struct commit_list)); @@ -167,6 +167,9 @@ const void *detach_commit_buffer(struct commit *, unsigned long *sizep); /* Find beginning and length of commit subject. */ int find_commit_subject(const char *commit_buffer, const char **subject); +/* Return length of the commit subject from commit log message. */ +size_t commit_subject_length(const char *body); + struct commit_list *commit_list_insert(struct commit *item, struct commit_list **list); int commit_list_contains(struct commit *item, diff --git a/compat/simple-ipc/ipc-shared.c b/compat/simple-ipc/ipc-shared.c new file mode 100644 index 0000000000..1edec81595 --- /dev/null +++ b/compat/simple-ipc/ipc-shared.c @@ -0,0 +1,28 @@ +#include "cache.h" +#include "simple-ipc.h" +#include "strbuf.h" +#include "pkt-line.h" +#include "thread-utils.h" + +#ifdef SUPPORTS_SIMPLE_IPC + +int ipc_server_run(const char *path, const struct ipc_server_opts *opts, + ipc_server_application_cb *application_cb, + void *application_data) +{ + struct ipc_server_data *server_data = NULL; + int ret; + + ret = ipc_server_run_async(&server_data, path, opts, + application_cb, application_data); + if (ret) + return ret; + + ret = ipc_server_await(server_data); + + ipc_server_free(server_data); + + return ret; +} + +#endif /* SUPPORTS_SIMPLE_IPC */ diff --git a/compat/simple-ipc/ipc-unix-socket.c b/compat/simple-ipc/ipc-unix-socket.c new file mode 100644 index 0000000000..38689b278d --- /dev/null +++ b/compat/simple-ipc/ipc-unix-socket.c @@ -0,0 +1,999 @@ +#include "cache.h" +#include "simple-ipc.h" +#include "strbuf.h" +#include "pkt-line.h" +#include "thread-utils.h" +#include "unix-socket.h" +#include "unix-stream-server.h" + +#ifdef NO_UNIX_SOCKETS +#error compat/simple-ipc/ipc-unix-socket.c requires Unix sockets +#endif + +enum ipc_active_state ipc_get_active_state(const char *path) +{ + enum ipc_active_state state = IPC_STATE__OTHER_ERROR; + struct ipc_client_connect_options options + = IPC_CLIENT_CONNECT_OPTIONS_INIT; + struct stat st; + struct ipc_client_connection *connection_test = NULL; + + options.wait_if_busy = 0; + options.wait_if_not_found = 0; + + if (lstat(path, &st) == -1) { + switch (errno) { + case ENOENT: + case ENOTDIR: + return IPC_STATE__NOT_LISTENING; + default: + return IPC_STATE__INVALID_PATH; + } + } + + /* also complain if a plain file is in the way */ + if ((st.st_mode & S_IFMT) != S_IFSOCK) + return IPC_STATE__INVALID_PATH; + + /* + * Just because the filesystem has a S_IFSOCK type inode + * at `path`, doesn't mean it that there is a server listening. + * Ping it to be sure. + */ + state = ipc_client_try_connect(path, &options, &connection_test); + ipc_client_close_connection(connection_test); + + return state; +} + +/* + * Retry frequency when trying to connect to a server. + * + * This value should be short enough that we don't seriously delay our + * caller, but not fast enough that our spinning puts pressure on the + * system. + */ +#define WAIT_STEP_MS (50) + +/* + * Try to connect to the server. If the server is just starting up or + * is very busy, we may not get a connection the first time. + */ +static enum ipc_active_state connect_to_server( + const char *path, + int timeout_ms, + const struct ipc_client_connect_options *options, + int *pfd) +{ + int k; + + *pfd = -1; + + for (k = 0; k < timeout_ms; k += WAIT_STEP_MS) { + int fd = unix_stream_connect(path, options->uds_disallow_chdir); + + if (fd != -1) { + *pfd = fd; + return IPC_STATE__LISTENING; + } + + if (errno == ENOENT) { + if (!options->wait_if_not_found) + return IPC_STATE__PATH_NOT_FOUND; + + goto sleep_and_try_again; + } + + if (errno == ETIMEDOUT) { + if (!options->wait_if_busy) + return IPC_STATE__NOT_LISTENING; + + goto sleep_and_try_again; + } + + if (errno == ECONNREFUSED) { + if (!options->wait_if_busy) + return IPC_STATE__NOT_LISTENING; + + goto sleep_and_try_again; + } + + return IPC_STATE__OTHER_ERROR; + + sleep_and_try_again: + sleep_millisec(WAIT_STEP_MS); + } + + return IPC_STATE__NOT_LISTENING; +} + +/* + * The total amount of time that we are willing to wait when trying to + * connect to a server. + * + * When the server is first started, it might take a little while for + * it to become ready to service requests. Likewise, the server may + * be very (temporarily) busy and not respond to our connections. + * + * We should gracefully and silently handle those conditions and try + * again for a reasonable time period. + * + * The value chosen here should be long enough for the server + * to reliably heal from the above conditions. + */ +#define MY_CONNECTION_TIMEOUT_MS (1000) + +enum ipc_active_state ipc_client_try_connect( + const char *path, + const struct ipc_client_connect_options *options, + struct ipc_client_connection **p_connection) +{ + enum ipc_active_state state = IPC_STATE__OTHER_ERROR; + int fd = -1; + + *p_connection = NULL; + + trace2_region_enter("ipc-client", "try-connect", NULL); + trace2_data_string("ipc-client", NULL, "try-connect/path", path); + + state = connect_to_server(path, MY_CONNECTION_TIMEOUT_MS, + options, &fd); + + trace2_data_intmax("ipc-client", NULL, "try-connect/state", + (intmax_t)state); + trace2_region_leave("ipc-client", "try-connect", NULL); + + if (state == IPC_STATE__LISTENING) { + (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection)); + (*p_connection)->fd = fd; + } + + return state; +} + +void ipc_client_close_connection(struct ipc_client_connection *connection) +{ + if (!connection) + return; + + if (connection->fd != -1) + close(connection->fd); + + free(connection); +} + +int ipc_client_send_command_to_connection( + struct ipc_client_connection *connection, + const char *message, struct strbuf *answer) +{ + int ret = 0; + + strbuf_setlen(answer, 0); + + trace2_region_enter("ipc-client", "send-command", NULL); + + if (write_packetized_from_buf_no_flush(message, strlen(message), + connection->fd) < 0 || + packet_flush_gently(connection->fd) < 0) { + ret = error(_("could not send IPC command")); + goto done; + } + + if (read_packetized_to_strbuf( + connection->fd, answer, + PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) { + ret = error(_("could not read IPC response")); + goto done; + } + +done: + trace2_region_leave("ipc-client", "send-command", NULL); + return ret; +} + +int ipc_client_send_command(const char *path, + const struct ipc_client_connect_options *options, + const char *message, struct strbuf *answer) +{ + int ret = -1; + enum ipc_active_state state; + struct ipc_client_connection *connection = NULL; + + state = ipc_client_try_connect(path, options, &connection); + + if (state != IPC_STATE__LISTENING) + return ret; + + ret = ipc_client_send_command_to_connection(connection, message, answer); + + ipc_client_close_connection(connection); + + return ret; +} + +static int set_socket_blocking_flag(int fd, int make_nonblocking) +{ + int flags; + + flags = fcntl(fd, F_GETFL, NULL); + + if (flags < 0) + return -1; + + if (make_nonblocking) + flags |= O_NONBLOCK; + else + flags &= ~O_NONBLOCK; + + return fcntl(fd, F_SETFL, flags); +} + +/* + * Magic numbers used to annotate callback instance data. + * These are used to help guard against accidentally passing the + * wrong instance data across multiple levels of callbacks (which + * is easy to do if there are `void*` arguments). + */ +enum magic { + MAGIC_SERVER_REPLY_DATA, + MAGIC_WORKER_THREAD_DATA, + MAGIC_ACCEPT_THREAD_DATA, + MAGIC_SERVER_DATA, +}; + +struct ipc_server_reply_data { + enum magic magic; + int fd; + struct ipc_worker_thread_data *worker_thread_data; +}; + +struct ipc_worker_thread_data { + enum magic magic; + struct ipc_worker_thread_data *next_thread; + struct ipc_server_data *server_data; + pthread_t pthread_id; +}; + +struct ipc_accept_thread_data { + enum magic magic; + struct ipc_server_data *server_data; + + struct unix_ss_socket *server_socket; + + int fd_send_shutdown; + int fd_wait_shutdown; + pthread_t pthread_id; +}; + +/* + * With unix-sockets, the conceptual "ipc-server" is implemented as a single + * controller "accept-thread" thread and a pool of "worker-thread" threads. + * The former does the usual `accept()` loop and dispatches connections + * to an idle worker thread. The worker threads wait in an idle loop for + * a new connection, communicate with the client and relay data to/from + * the `application_cb` and then wait for another connection from the + * server thread. This avoids the overhead of constantly creating and + * destroying threads. + */ +struct ipc_server_data { + enum magic magic; + ipc_server_application_cb *application_cb; + void *application_data; + struct strbuf buf_path; + + struct ipc_accept_thread_data *accept_thread; + struct ipc_worker_thread_data *worker_thread_list; + + pthread_mutex_t work_available_mutex; + pthread_cond_t work_available_cond; + + /* + * Accepted but not yet processed client connections are kept + * in a circular buffer FIFO. The queue is empty when the + * positions are equal. + */ + int *fifo_fds; + int queue_size; + int back_pos; + int front_pos; + + int shutdown_requested; + int is_stopped; +}; + +/* + * Remove and return the oldest queued connection. + * + * Returns -1 if empty. + */ +static int fifo_dequeue(struct ipc_server_data *server_data) +{ + /* ASSERT holding mutex */ + + int fd; + + if (server_data->back_pos == server_data->front_pos) + return -1; + + fd = server_data->fifo_fds[server_data->front_pos]; + server_data->fifo_fds[server_data->front_pos] = -1; + + server_data->front_pos++; + if (server_data->front_pos == server_data->queue_size) + server_data->front_pos = 0; + + return fd; +} + +/* + * Push a new fd onto the back of the queue. + * + * Drop it and return -1 if queue is already full. + */ +static int fifo_enqueue(struct ipc_server_data *server_data, int fd) +{ + /* ASSERT holding mutex */ + + int next_back_pos; + + next_back_pos = server_data->back_pos + 1; + if (next_back_pos == server_data->queue_size) + next_back_pos = 0; + + if (next_back_pos == server_data->front_pos) { + /* Queue is full. Just drop it. */ + close(fd); + return -1; + } + + server_data->fifo_fds[server_data->back_pos] = fd; + server_data->back_pos = next_back_pos; + + return fd; +} + +/* + * Wait for a connection to be queued to the FIFO and return it. + * + * Returns -1 if someone has already requested a shutdown. + */ +static int worker_thread__wait_for_connection( + struct ipc_worker_thread_data *worker_thread_data) +{ + /* ASSERT NOT holding mutex */ + + struct ipc_server_data *server_data = worker_thread_data->server_data; + int fd = -1; + + pthread_mutex_lock(&server_data->work_available_mutex); + for (;;) { + if (server_data->shutdown_requested) + break; + + fd = fifo_dequeue(server_data); + if (fd >= 0) + break; + + pthread_cond_wait(&server_data->work_available_cond, + &server_data->work_available_mutex); + } + pthread_mutex_unlock(&server_data->work_available_mutex); + + return fd; +} + +/* + * Forward declare our reply callback function so that any compiler + * errors are reported when we actually define the function (in addition + * to any errors reported when we try to pass this callback function as + * a parameter in a function call). The former are easier to understand. + */ +static ipc_server_reply_cb do_io_reply_callback; + +/* + * Relay application's response message to the client process. + * (We do not flush at this point because we allow the caller + * to chunk data to the client thru us.) + */ +static int do_io_reply_callback(struct ipc_server_reply_data *reply_data, + const char *response, size_t response_len) +{ + if (reply_data->magic != MAGIC_SERVER_REPLY_DATA) + BUG("reply_cb called with wrong instance data"); + + return write_packetized_from_buf_no_flush(response, response_len, + reply_data->fd); +} + +/* A randomly chosen value. */ +#define MY_WAIT_POLL_TIMEOUT_MS (10) + +/* + * If the client hangs up without sending any data on the wire, just + * quietly close the socket and ignore this client. + * + * This worker thread is committed to reading the IPC request data + * from the client at the other end of this fd. Wait here for the + * client to actually put something on the wire -- because if the + * client just does a ping (connect and hangup without sending any + * data), our use of the pkt-line read routines will spew an error + * message. + * + * Return -1 if the client hung up. + * Return 0 if data (possibly incomplete) is ready. + */ +static int worker_thread__wait_for_io_start( + struct ipc_worker_thread_data *worker_thread_data, + int fd) +{ + struct ipc_server_data *server_data = worker_thread_data->server_data; + struct pollfd pollfd[1]; + int result; + + for (;;) { + pollfd[0].fd = fd; + pollfd[0].events = POLLIN; + + result = poll(pollfd, 1, MY_WAIT_POLL_TIMEOUT_MS); + if (result < 0) { + if (errno == EINTR) + continue; + goto cleanup; + } + + if (result == 0) { + /* a timeout */ + + int in_shutdown; + + pthread_mutex_lock(&server_data->work_available_mutex); + in_shutdown = server_data->shutdown_requested; + pthread_mutex_unlock(&server_data->work_available_mutex); + + /* + * If a shutdown is already in progress and this + * client has not started talking yet, just drop it. + */ + if (in_shutdown) + goto cleanup; + continue; + } + + if (pollfd[0].revents & POLLHUP) + goto cleanup; + + if (pollfd[0].revents & POLLIN) + return 0; + + goto cleanup; + } + +cleanup: + close(fd); + return -1; +} + +/* + * Receive the request/command from the client and pass it to the + * registered request-callback. The request-callback will compose + * a response and call our reply-callback to send it to the client. + */ +static int worker_thread__do_io( + struct ipc_worker_thread_data *worker_thread_data, + int fd) +{ + /* ASSERT NOT holding lock */ + + struct strbuf buf = STRBUF_INIT; + struct ipc_server_reply_data reply_data; + int ret = 0; + + reply_data.magic = MAGIC_SERVER_REPLY_DATA; + reply_data.worker_thread_data = worker_thread_data; + + reply_data.fd = fd; + + ret = read_packetized_to_strbuf( + reply_data.fd, &buf, + PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR); + if (ret >= 0) { + ret = worker_thread_data->server_data->application_cb( + worker_thread_data->server_data->application_data, + buf.buf, do_io_reply_callback, &reply_data); + + packet_flush_gently(reply_data.fd); + } + else { + /* + * The client probably disconnected/shutdown before it + * could send a well-formed message. Ignore it. + */ + } + + strbuf_release(&buf); + close(reply_data.fd); + + return ret; +} + +/* + * Block SIGPIPE on the current thread (so that we get EPIPE from + * write() rather than an actual signal). + * + * Note that using sigchain_push() and _pop() to control SIGPIPE + * around our IO calls is not thread safe: + * [] It uses a global stack of handler frames. + * [] It uses ALLOC_GROW() to resize it. + * [] Finally, according to the `signal(2)` man-page: + * "The effects of `signal()` in a multithreaded process are unspecified." + */ +static void thread_block_sigpipe(sigset_t *old_set) +{ + sigset_t new_set; + + sigemptyset(&new_set); + sigaddset(&new_set, SIGPIPE); + + sigemptyset(old_set); + pthread_sigmask(SIG_BLOCK, &new_set, old_set); +} + +/* + * Thread proc for an IPC worker thread. It handles a series of + * connections from clients. It pulls the next fd from the queue + * processes it, and then waits for the next client. + * + * Block SIGPIPE in this worker thread for the life of the thread. + * This avoids stray (and sometimes delayed) SIGPIPE signals caused + * by client errors and/or when we are under extremely heavy IO load. + * + * This means that the application callback will have SIGPIPE blocked. + * The callback should not change it. + */ +static void *worker_thread_proc(void *_worker_thread_data) +{ + struct ipc_worker_thread_data *worker_thread_data = _worker_thread_data; + struct ipc_server_data *server_data = worker_thread_data->server_data; + sigset_t old_set; + int fd, io; + int ret; + + trace2_thread_start("ipc-worker"); + + thread_block_sigpipe(&old_set); + + for (;;) { + fd = worker_thread__wait_for_connection(worker_thread_data); + if (fd == -1) + break; /* in shutdown */ + + io = worker_thread__wait_for_io_start(worker_thread_data, fd); + if (io == -1) + continue; /* client hung up without sending anything */ + + ret = worker_thread__do_io(worker_thread_data, fd); + + if (ret == SIMPLE_IPC_QUIT) { + trace2_data_string("ipc-worker", NULL, "queue_stop_async", + "application_quit"); + /* + * The application layer is telling the ipc-server + * layer to shutdown. + * + * We DO NOT have a response to send to the client. + * + * Queue an async stop (to stop the other threads) and + * allow this worker thread to exit now (no sense waiting + * for the thread-pool shutdown signal). + * + * Other non-idle worker threads are allowed to finish + * responding to their current clients. + */ + ipc_server_stop_async(server_data); + break; + } + } + + trace2_thread_exit(); + return NULL; +} + +/* A randomly chosen value. */ +#define MY_ACCEPT_POLL_TIMEOUT_MS (60 * 1000) + +/* + * Accept a new client connection on our socket. This uses non-blocking + * IO so that we can also wait for shutdown requests on our socket-pair + * without actually spinning on a fast timeout. + */ +static int accept_thread__wait_for_connection( + struct ipc_accept_thread_data *accept_thread_data) +{ + struct pollfd pollfd[2]; + int result; + + for (;;) { + pollfd[0].fd = accept_thread_data->fd_wait_shutdown; + pollfd[0].events = POLLIN; + + pollfd[1].fd = accept_thread_data->server_socket->fd_socket; + pollfd[1].events = POLLIN; + + result = poll(pollfd, 2, MY_ACCEPT_POLL_TIMEOUT_MS); + if (result < 0) { + if (errno == EINTR) + continue; + return result; + } + + if (result == 0) { + /* a timeout */ + + /* + * If someone deletes or force-creates a new unix + * domain socket at our path, all future clients + * will be routed elsewhere and we silently starve. + * If that happens, just queue a shutdown. + */ + if (unix_ss_was_stolen( + accept_thread_data->server_socket)) { + trace2_data_string("ipc-accept", NULL, + "queue_stop_async", + "socket_stolen"); + ipc_server_stop_async( + accept_thread_data->server_data); + } + continue; + } + + if (pollfd[0].revents & POLLIN) { + /* shutdown message queued to socketpair */ + return -1; + } + + if (pollfd[1].revents & POLLIN) { + /* a connection is available on server_socket */ + + int client_fd = + accept(accept_thread_data->server_socket->fd_socket, + NULL, NULL); + if (client_fd >= 0) + return client_fd; + + /* + * An error here is unlikely -- it probably + * indicates that the connecting process has + * already dropped the connection. + */ + continue; + } + + BUG("unandled poll result errno=%d r[0]=%d r[1]=%d", + errno, pollfd[0].revents, pollfd[1].revents); + } +} + +/* + * Thread proc for the IPC server "accept thread". This waits for + * an incoming socket connection, appends it to the queue of available + * connections, and notifies a worker thread to process it. + * + * Block SIGPIPE in this thread for the life of the thread. This + * avoids any stray SIGPIPE signals when closing pipe fds under + * extremely heavy loads (such as when the fifo queue is full and we + * drop incomming connections). + */ +static void *accept_thread_proc(void *_accept_thread_data) +{ + struct ipc_accept_thread_data *accept_thread_data = _accept_thread_data; + struct ipc_server_data *server_data = accept_thread_data->server_data; + sigset_t old_set; + + trace2_thread_start("ipc-accept"); + + thread_block_sigpipe(&old_set); + + for (;;) { + int client_fd = accept_thread__wait_for_connection( + accept_thread_data); + + pthread_mutex_lock(&server_data->work_available_mutex); + if (server_data->shutdown_requested) { + pthread_mutex_unlock(&server_data->work_available_mutex); + if (client_fd >= 0) + close(client_fd); + break; + } + + if (client_fd < 0) { + /* ignore transient accept() errors */ + } + else { + fifo_enqueue(server_data, client_fd); + pthread_cond_broadcast(&server_data->work_available_cond); + } + pthread_mutex_unlock(&server_data->work_available_mutex); + } + + trace2_thread_exit(); + return NULL; +} + +/* + * We can't predict the connection arrival rate relative to the worker + * processing rate, therefore we allow the "accept-thread" to queue up + * a generous number of connections, since we'd rather have the client + * not unnecessarily timeout if we can avoid it. (The assumption is + * that this will be used for FSMonitor and a few second wait on a + * connection is better than having the client timeout and do the full + * computation itself.) + * + * The FIFO queue size is set to a multiple of the worker pool size. + * This value chosen at random. + */ +#define FIFO_SCALE (100) + +/* + * The backlog value for `listen(2)`. This doesn't need to huge, + * rather just large enough for our "accept-thread" to wake up and + * queue incoming connections onto the FIFO without the kernel + * dropping any. + * + * This value chosen at random. + */ +#define LISTEN_BACKLOG (50) + +static int create_listener_socket( + const char *path, + const struct ipc_server_opts *ipc_opts, + struct unix_ss_socket **new_server_socket) +{ + struct unix_ss_socket *server_socket = NULL; + struct unix_stream_listen_opts uslg_opts = UNIX_STREAM_LISTEN_OPTS_INIT; + int ret; + + uslg_opts.listen_backlog_size = LISTEN_BACKLOG; + uslg_opts.disallow_chdir = ipc_opts->uds_disallow_chdir; + + ret = unix_ss_create(path, &uslg_opts, -1, &server_socket); + if (ret) + return ret; + + if (set_socket_blocking_flag(server_socket->fd_socket, 1)) { + int saved_errno = errno; + unix_ss_free(server_socket); + errno = saved_errno; + return -1; + } + + *new_server_socket = server_socket; + + trace2_data_string("ipc-server", NULL, "listen-with-lock", path); + return 0; +} + +static int setup_listener_socket( + const char *path, + const struct ipc_server_opts *ipc_opts, + struct unix_ss_socket **new_server_socket) +{ + int ret, saved_errno; + + trace2_region_enter("ipc-server", "create-listener_socket", NULL); + + ret = create_listener_socket(path, ipc_opts, new_server_socket); + + saved_errno = errno; + trace2_region_leave("ipc-server", "create-listener_socket", NULL); + errno = saved_errno; + + return ret; +} + +/* + * Start IPC server in a pool of background threads. + */ +int ipc_server_run_async(struct ipc_server_data **returned_server_data, + const char *path, const struct ipc_server_opts *opts, + ipc_server_application_cb *application_cb, + void *application_data) +{ + struct unix_ss_socket *server_socket = NULL; + struct ipc_server_data *server_data; + int sv[2]; + int k; + int ret; + int nr_threads = opts->nr_threads; + + *returned_server_data = NULL; + + /* + * Create a socketpair and set sv[1] to non-blocking. This + * will used to send a shutdown message to the accept-thread + * and allows the accept-thread to wait on EITHER a client + * connection or a shutdown request without spinning. + */ + if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv) < 0) + return -1; + + if (set_socket_blocking_flag(sv[1], 1)) { + int saved_errno = errno; + close(sv[0]); + close(sv[1]); + errno = saved_errno; + return -1; + } + + ret = setup_listener_socket(path, opts, &server_socket); + if (ret) { + int saved_errno = errno; + close(sv[0]); + close(sv[1]); + errno = saved_errno; + return ret; + } + + server_data = xcalloc(1, sizeof(*server_data)); + server_data->magic = MAGIC_SERVER_DATA; + server_data->application_cb = application_cb; + server_data->application_data = application_data; + strbuf_init(&server_data->buf_path, 0); + strbuf_addstr(&server_data->buf_path, path); + + if (nr_threads < 1) + nr_threads = 1; + + pthread_mutex_init(&server_data->work_available_mutex, NULL); + pthread_cond_init(&server_data->work_available_cond, NULL); + + server_data->queue_size = nr_threads * FIFO_SCALE; + CALLOC_ARRAY(server_data->fifo_fds, server_data->queue_size); + + server_data->accept_thread = + xcalloc(1, sizeof(*server_data->accept_thread)); + server_data->accept_thread->magic = MAGIC_ACCEPT_THREAD_DATA; + server_data->accept_thread->server_data = server_data; + server_data->accept_thread->server_socket = server_socket; + server_data->accept_thread->fd_send_shutdown = sv[0]; + server_data->accept_thread->fd_wait_shutdown = sv[1]; + + if (pthread_create(&server_data->accept_thread->pthread_id, NULL, + accept_thread_proc, server_data->accept_thread)) + die_errno(_("could not start accept_thread '%s'"), path); + + for (k = 0; k < nr_threads; k++) { + struct ipc_worker_thread_data *wtd; + + wtd = xcalloc(1, sizeof(*wtd)); + wtd->magic = MAGIC_WORKER_THREAD_DATA; + wtd->server_data = server_data; + + if (pthread_create(&wtd->pthread_id, NULL, worker_thread_proc, + wtd)) { + if (k == 0) + die(_("could not start worker[0] for '%s'"), + path); + /* + * Limp along with the thread pool that we have. + */ + break; + } + + wtd->next_thread = server_data->worker_thread_list; + server_data->worker_thread_list = wtd; + } + + *returned_server_data = server_data; + return 0; +} + +/* + * Gently tell the IPC server treads to shutdown. + * Can be run on any thread. + */ +int ipc_server_stop_async(struct ipc_server_data *server_data) +{ + /* ASSERT NOT holding mutex */ + + int fd; + + if (!server_data) + return 0; + + trace2_region_enter("ipc-server", "server-stop-async", NULL); + + pthread_mutex_lock(&server_data->work_available_mutex); + + server_data->shutdown_requested = 1; + + /* + * Write a byte to the shutdown socket pair to wake up the + * accept-thread. + */ + if (write(server_data->accept_thread->fd_send_shutdown, "Q", 1) < 0) + error_errno("could not write to fd_send_shutdown"); + + /* + * Drain the queue of existing connections. + */ + while ((fd = fifo_dequeue(server_data)) != -1) + close(fd); + + /* + * Gently tell worker threads to stop processing new connections + * and exit. (This does not abort in-process conversations.) + */ + pthread_cond_broadcast(&server_data->work_available_cond); + + pthread_mutex_unlock(&server_data->work_available_mutex); + + trace2_region_leave("ipc-server", "server-stop-async", NULL); + + return 0; +} + +/* + * Wait for all IPC server threads to stop. + */ +int ipc_server_await(struct ipc_server_data *server_data) +{ + pthread_join(server_data->accept_thread->pthread_id, NULL); + + if (!server_data->shutdown_requested) + BUG("ipc-server: accept-thread stopped for '%s'", + server_data->buf_path.buf); + + while (server_data->worker_thread_list) { + struct ipc_worker_thread_data *wtd = + server_data->worker_thread_list; + + pthread_join(wtd->pthread_id, NULL); + + server_data->worker_thread_list = wtd->next_thread; + free(wtd); + } + + server_data->is_stopped = 1; + + return 0; +} + +void ipc_server_free(struct ipc_server_data *server_data) +{ + struct ipc_accept_thread_data * accept_thread_data; + + if (!server_data) + return; + + if (!server_data->is_stopped) + BUG("cannot free ipc-server while running for '%s'", + server_data->buf_path.buf); + + accept_thread_data = server_data->accept_thread; + if (accept_thread_data) { + unix_ss_free(accept_thread_data->server_socket); + + if (accept_thread_data->fd_send_shutdown != -1) + close(accept_thread_data->fd_send_shutdown); + if (accept_thread_data->fd_wait_shutdown != -1) + close(accept_thread_data->fd_wait_shutdown); + + free(server_data->accept_thread); + } + + while (server_data->worker_thread_list) { + struct ipc_worker_thread_data *wtd = + server_data->worker_thread_list; + + server_data->worker_thread_list = wtd->next_thread; + free(wtd); + } + + pthread_cond_destroy(&server_data->work_available_cond); + pthread_mutex_destroy(&server_data->work_available_mutex); + + strbuf_release(&server_data->buf_path); + + free(server_data->fifo_fds); + free(server_data); +} diff --git a/compat/simple-ipc/ipc-win32.c b/compat/simple-ipc/ipc-win32.c new file mode 100644 index 0000000000..8f89c02037 --- /dev/null +++ b/compat/simple-ipc/ipc-win32.c @@ -0,0 +1,751 @@ +#include "cache.h" +#include "simple-ipc.h" +#include "strbuf.h" +#include "pkt-line.h" +#include "thread-utils.h" + +#ifndef GIT_WINDOWS_NATIVE +#error This file can only be compiled on Windows +#endif + +static int initialize_pipe_name(const char *path, wchar_t *wpath, size_t alloc) +{ + int off = 0; + struct strbuf realpath = STRBUF_INIT; + + if (!strbuf_realpath(&realpath, path, 0)) + return -1; + + off = swprintf(wpath, alloc, L"\\\\.\\pipe\\"); + if (xutftowcs(wpath + off, realpath.buf, alloc - off) < 0) + return -1; + + /* Handle drive prefix */ + if (wpath[off] && wpath[off + 1] == L':') { + wpath[off + 1] = L'_'; + off += 2; + } + + for (; wpath[off]; off++) + if (wpath[off] == L'/') + wpath[off] = L'\\'; + + strbuf_release(&realpath); + return 0; +} + +static enum ipc_active_state get_active_state(wchar_t *pipe_path) +{ + if (WaitNamedPipeW(pipe_path, NMPWAIT_USE_DEFAULT_WAIT)) + return IPC_STATE__LISTENING; + + if (GetLastError() == ERROR_SEM_TIMEOUT) + return IPC_STATE__NOT_LISTENING; + + if (GetLastError() == ERROR_FILE_NOT_FOUND) + return IPC_STATE__PATH_NOT_FOUND; + + return IPC_STATE__OTHER_ERROR; +} + +enum ipc_active_state ipc_get_active_state(const char *path) +{ + wchar_t pipe_path[MAX_PATH]; + + if (initialize_pipe_name(path, pipe_path, ARRAY_SIZE(pipe_path)) < 0) + return IPC_STATE__INVALID_PATH; + + return get_active_state(pipe_path); +} + +#define WAIT_STEP_MS (50) + +static enum ipc_active_state connect_to_server( + const wchar_t *wpath, + DWORD timeout_ms, + const struct ipc_client_connect_options *options, + int *pfd) +{ + DWORD t_start_ms, t_waited_ms; + DWORD step_ms; + HANDLE hPipe = INVALID_HANDLE_VALUE; + DWORD mode = PIPE_READMODE_BYTE; + DWORD gle; + + *pfd = -1; + + for (;;) { + hPipe = CreateFileW(wpath, GENERIC_READ | GENERIC_WRITE, + 0, NULL, OPEN_EXISTING, 0, NULL); + if (hPipe != INVALID_HANDLE_VALUE) + break; + + gle = GetLastError(); + + switch (gle) { + case ERROR_FILE_NOT_FOUND: + if (!options->wait_if_not_found) + return IPC_STATE__PATH_NOT_FOUND; + if (!timeout_ms) + return IPC_STATE__PATH_NOT_FOUND; + + step_ms = (timeout_ms < WAIT_STEP_MS) ? + timeout_ms : WAIT_STEP_MS; + sleep_millisec(step_ms); + + timeout_ms -= step_ms; + break; /* try again */ + + case ERROR_PIPE_BUSY: + if (!options->wait_if_busy) + return IPC_STATE__NOT_LISTENING; + if (!timeout_ms) + return IPC_STATE__NOT_LISTENING; + + t_start_ms = (DWORD)(getnanotime() / 1000000); + + if (!WaitNamedPipeW(wpath, timeout_ms)) { + if (GetLastError() == ERROR_SEM_TIMEOUT) + return IPC_STATE__NOT_LISTENING; + + return IPC_STATE__OTHER_ERROR; + } + + /* + * A pipe server instance became available. + * Race other client processes to connect to + * it. + * + * But first decrement our overall timeout so + * that we don't starve if we keep losing the + * race. But also guard against special + * NPMWAIT_ values (0 and -1). + */ + t_waited_ms = (DWORD)(getnanotime() / 1000000) - t_start_ms; + if (t_waited_ms < timeout_ms) + timeout_ms -= t_waited_ms; + else + timeout_ms = 1; + break; /* try again */ + + default: + return IPC_STATE__OTHER_ERROR; + } + } + + if (!SetNamedPipeHandleState(hPipe, &mode, NULL, NULL)) { + CloseHandle(hPipe); + return IPC_STATE__OTHER_ERROR; + } + + *pfd = _open_osfhandle((intptr_t)hPipe, O_RDWR|O_BINARY); + if (*pfd < 0) { + CloseHandle(hPipe); + return IPC_STATE__OTHER_ERROR; + } + + /* fd now owns hPipe */ + + return IPC_STATE__LISTENING; +} + +/* + * The default connection timeout for Windows clients. + * + * This is not currently part of the ipc_ API (nor the config settings) + * because of differences between Windows and other platforms. + * + * This value was chosen at random. + */ +#define WINDOWS_CONNECTION_TIMEOUT_MS (30000) + +enum ipc_active_state ipc_client_try_connect( + const char *path, + const struct ipc_client_connect_options *options, + struct ipc_client_connection **p_connection) +{ + wchar_t wpath[MAX_PATH]; + enum ipc_active_state state = IPC_STATE__OTHER_ERROR; + int fd = -1; + + *p_connection = NULL; + + trace2_region_enter("ipc-client", "try-connect", NULL); + trace2_data_string("ipc-client", NULL, "try-connect/path", path); + + if (initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath)) < 0) + state = IPC_STATE__INVALID_PATH; + else + state = connect_to_server(wpath, WINDOWS_CONNECTION_TIMEOUT_MS, + options, &fd); + + trace2_data_intmax("ipc-client", NULL, "try-connect/state", + (intmax_t)state); + trace2_region_leave("ipc-client", "try-connect", NULL); + + if (state == IPC_STATE__LISTENING) { + (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection)); + (*p_connection)->fd = fd; + } + + return state; +} + +void ipc_client_close_connection(struct ipc_client_connection *connection) +{ + if (!connection) + return; + + if (connection->fd != -1) + close(connection->fd); + + free(connection); +} + +int ipc_client_send_command_to_connection( + struct ipc_client_connection *connection, + const char *message, struct strbuf *answer) +{ + int ret = 0; + + strbuf_setlen(answer, 0); + + trace2_region_enter("ipc-client", "send-command", NULL); + + if (write_packetized_from_buf_no_flush(message, strlen(message), + connection->fd) < 0 || + packet_flush_gently(connection->fd) < 0) { + ret = error(_("could not send IPC command")); + goto done; + } + + FlushFileBuffers((HANDLE)_get_osfhandle(connection->fd)); + + if (read_packetized_to_strbuf( + connection->fd, answer, + PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) { + ret = error(_("could not read IPC response")); + goto done; + } + +done: + trace2_region_leave("ipc-client", "send-command", NULL); + return ret; +} + +int ipc_client_send_command(const char *path, + const struct ipc_client_connect_options *options, + const char *message, struct strbuf *response) +{ + int ret = -1; + enum ipc_active_state state; + struct ipc_client_connection *connection = NULL; + + state = ipc_client_try_connect(path, options, &connection); + + if (state != IPC_STATE__LISTENING) + return ret; + + ret = ipc_client_send_command_to_connection(connection, message, response); + + ipc_client_close_connection(connection); + + return ret; +} + +/* + * Duplicate the given pipe handle and wrap it in a file descriptor so + * that we can use pkt-line on it. + */ +static int dup_fd_from_pipe(const HANDLE pipe) +{ + HANDLE process = GetCurrentProcess(); + HANDLE handle; + int fd; + + if (!DuplicateHandle(process, pipe, process, &handle, 0, FALSE, + DUPLICATE_SAME_ACCESS)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + + fd = _open_osfhandle((intptr_t)handle, O_RDWR|O_BINARY); + if (fd < 0) { + errno = err_win_to_posix(GetLastError()); + CloseHandle(handle); + return -1; + } + + /* + * `handle` is now owned by `fd` and will be automatically closed + * when the descriptor is closed. + */ + + return fd; +} + +/* + * Magic numbers used to annotate callback instance data. + * These are used to help guard against accidentally passing the + * wrong instance data across multiple levels of callbacks (which + * is easy to do if there are `void*` arguments). + */ +enum magic { + MAGIC_SERVER_REPLY_DATA, + MAGIC_SERVER_THREAD_DATA, + MAGIC_SERVER_DATA, +}; + +struct ipc_server_reply_data { + enum magic magic; + int fd; + struct ipc_server_thread_data *server_thread_data; +}; + +struct ipc_server_thread_data { + enum magic magic; + struct ipc_server_thread_data *next_thread; + struct ipc_server_data *server_data; + pthread_t pthread_id; + HANDLE hPipe; +}; + +/* + * On Windows, the conceptual "ipc-server" is implemented as a pool of + * n idential/peer "server-thread" threads. That is, there is no + * hierarchy of threads; and therefore no controller thread managing + * the pool. Each thread has an independent handle to the named pipe, + * receives incoming connections, processes the client, and re-uses + * the pipe for the next client connection. + * + * Therefore, the "ipc-server" only needs to maintain a list of the + * spawned threads for eventual "join" purposes. + * + * A single "stop-event" is visible to all of the server threads to + * tell them to shutdown (when idle). + */ +struct ipc_server_data { + enum magic magic; + ipc_server_application_cb *application_cb; + void *application_data; + struct strbuf buf_path; + wchar_t wpath[MAX_PATH]; + + HANDLE hEventStopRequested; + struct ipc_server_thread_data *thread_list; + int is_stopped; +}; + +enum connect_result { + CR_CONNECTED = 0, + CR_CONNECT_PENDING, + CR_CONNECT_ERROR, + CR_WAIT_ERROR, + CR_SHUTDOWN, +}; + +static enum connect_result queue_overlapped_connect( + struct ipc_server_thread_data *server_thread_data, + OVERLAPPED *lpo) +{ + if (ConnectNamedPipe(server_thread_data->hPipe, lpo)) + goto failed; + + switch (GetLastError()) { + case ERROR_IO_PENDING: + return CR_CONNECT_PENDING; + + case ERROR_PIPE_CONNECTED: + SetEvent(lpo->hEvent); + return CR_CONNECTED; + + default: + break; + } + +failed: + error(_("ConnectNamedPipe failed for '%s' (%lu)"), + server_thread_data->server_data->buf_path.buf, + GetLastError()); + return CR_CONNECT_ERROR; +} + +/* + * Use Windows Overlapped IO to wait for a connection or for our event + * to be signalled. + */ +static enum connect_result wait_for_connection( + struct ipc_server_thread_data *server_thread_data, + OVERLAPPED *lpo) +{ + enum connect_result r; + HANDLE waitHandles[2]; + DWORD dwWaitResult; + + r = queue_overlapped_connect(server_thread_data, lpo); + if (r != CR_CONNECT_PENDING) + return r; + + waitHandles[0] = server_thread_data->server_data->hEventStopRequested; + waitHandles[1] = lpo->hEvent; + + dwWaitResult = WaitForMultipleObjects(2, waitHandles, FALSE, INFINITE); + switch (dwWaitResult) { + case WAIT_OBJECT_0 + 0: + return CR_SHUTDOWN; + + case WAIT_OBJECT_0 + 1: + ResetEvent(lpo->hEvent); + return CR_CONNECTED; + + default: + return CR_WAIT_ERROR; + } +} + +/* + * Forward declare our reply callback function so that any compiler + * errors are reported when we actually define the function (in addition + * to any errors reported when we try to pass this callback function as + * a parameter in a function call). The former are easier to understand. + */ +static ipc_server_reply_cb do_io_reply_callback; + +/* + * Relay application's response message to the client process. + * (We do not flush at this point because we allow the caller + * to chunk data to the client thru us.) + */ +static int do_io_reply_callback(struct ipc_server_reply_data *reply_data, + const char *response, size_t response_len) +{ + if (reply_data->magic != MAGIC_SERVER_REPLY_DATA) + BUG("reply_cb called with wrong instance data"); + + return write_packetized_from_buf_no_flush(response, response_len, + reply_data->fd); +} + +/* + * Receive the request/command from the client and pass it to the + * registered request-callback. The request-callback will compose + * a response and call our reply-callback to send it to the client. + * + * Simple-IPC only contains one round trip, so we flush and close + * here after the response. + */ +static int do_io(struct ipc_server_thread_data *server_thread_data) +{ + struct strbuf buf = STRBUF_INIT; + struct ipc_server_reply_data reply_data; + int ret = 0; + + reply_data.magic = MAGIC_SERVER_REPLY_DATA; + reply_data.server_thread_data = server_thread_data; + + reply_data.fd = dup_fd_from_pipe(server_thread_data->hPipe); + if (reply_data.fd < 0) + return error(_("could not create fd from pipe for '%s'"), + server_thread_data->server_data->buf_path.buf); + + ret = read_packetized_to_strbuf( + reply_data.fd, &buf, + PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR); + if (ret >= 0) { + ret = server_thread_data->server_data->application_cb( + server_thread_data->server_data->application_data, + buf.buf, do_io_reply_callback, &reply_data); + + packet_flush_gently(reply_data.fd); + + FlushFileBuffers((HANDLE)_get_osfhandle((reply_data.fd))); + } + else { + /* + * The client probably disconnected/shutdown before it + * could send a well-formed message. Ignore it. + */ + } + + strbuf_release(&buf); + close(reply_data.fd); + + return ret; +} + +/* + * Handle IPC request and response with this connected client. And reset + * the pipe to prepare for the next client. + */ +static int use_connection(struct ipc_server_thread_data *server_thread_data) +{ + int ret; + + ret = do_io(server_thread_data); + + FlushFileBuffers(server_thread_data->hPipe); + DisconnectNamedPipe(server_thread_data->hPipe); + + return ret; +} + +/* + * Thread proc for an IPC server worker thread. It handles a series of + * connections from clients. It cleans and reuses the hPipe between each + * client. + */ +static void *server_thread_proc(void *_server_thread_data) +{ + struct ipc_server_thread_data *server_thread_data = _server_thread_data; + HANDLE hEventConnected = INVALID_HANDLE_VALUE; + OVERLAPPED oConnect; + enum connect_result cr; + int ret; + + assert(server_thread_data->hPipe != INVALID_HANDLE_VALUE); + + trace2_thread_start("ipc-server"); + trace2_data_string("ipc-server", NULL, "pipe", + server_thread_data->server_data->buf_path.buf); + + hEventConnected = CreateEventW(NULL, TRUE, FALSE, NULL); + + memset(&oConnect, 0, sizeof(oConnect)); + oConnect.hEvent = hEventConnected; + + for (;;) { + cr = wait_for_connection(server_thread_data, &oConnect); + + switch (cr) { + case CR_SHUTDOWN: + goto finished; + + case CR_CONNECTED: + ret = use_connection(server_thread_data); + if (ret == SIMPLE_IPC_QUIT) { + ipc_server_stop_async( + server_thread_data->server_data); + goto finished; + } + if (ret > 0) { + /* + * Ignore (transient) IO errors with this + * client and reset for the next client. + */ + } + break; + + case CR_CONNECT_PENDING: + /* By construction, this should not happen. */ + BUG("ipc-server[%s]: unexpeced CR_CONNECT_PENDING", + server_thread_data->server_data->buf_path.buf); + + case CR_CONNECT_ERROR: + case CR_WAIT_ERROR: + /* + * Ignore these theoretical errors. + */ + DisconnectNamedPipe(server_thread_data->hPipe); + break; + + default: + BUG("unandled case after wait_for_connection"); + } + } + +finished: + CloseHandle(server_thread_data->hPipe); + CloseHandle(hEventConnected); + + trace2_thread_exit(); + return NULL; +} + +static HANDLE create_new_pipe(wchar_t *wpath, int is_first) +{ + HANDLE hPipe; + DWORD dwOpenMode, dwPipeMode; + LPSECURITY_ATTRIBUTES lpsa = NULL; + + dwOpenMode = PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND | + FILE_FLAG_OVERLAPPED; + + dwPipeMode = PIPE_TYPE_MESSAGE | PIPE_READMODE_BYTE | PIPE_WAIT | + PIPE_REJECT_REMOTE_CLIENTS; + + if (is_first) { + dwOpenMode |= FILE_FLAG_FIRST_PIPE_INSTANCE; + + /* + * On Windows, the first server pipe instance gets to + * set the ACL / Security Attributes on the named + * pipe; subsequent instances inherit and cannot + * change them. + * + * TODO Should we allow the application layer to + * specify security attributes, such as `LocalService` + * or `LocalSystem`, when we create the named pipe? + * This question is probably not important when the + * daemon is started by a foreground user process and + * only needs to talk to the current user, but may be + * if the daemon is run via the Control Panel as a + * System Service. + */ + } + + hPipe = CreateNamedPipeW(wpath, dwOpenMode, dwPipeMode, + PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0, lpsa); + + return hPipe; +} + +int ipc_server_run_async(struct ipc_server_data **returned_server_data, + const char *path, const struct ipc_server_opts *opts, + ipc_server_application_cb *application_cb, + void *application_data) +{ + struct ipc_server_data *server_data; + wchar_t wpath[MAX_PATH]; + HANDLE hPipeFirst = INVALID_HANDLE_VALUE; + int k; + int ret = 0; + int nr_threads = opts->nr_threads; + + *returned_server_data = NULL; + + ret = initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath)); + if (ret < 0) { + errno = EINVAL; + return -1; + } + + hPipeFirst = create_new_pipe(wpath, 1); + if (hPipeFirst == INVALID_HANDLE_VALUE) { + errno = EADDRINUSE; + return -2; + } + + server_data = xcalloc(1, sizeof(*server_data)); + server_data->magic = MAGIC_SERVER_DATA; + server_data->application_cb = application_cb; + server_data->application_data = application_data; + server_data->hEventStopRequested = CreateEvent(NULL, TRUE, FALSE, NULL); + strbuf_init(&server_data->buf_path, 0); + strbuf_addstr(&server_data->buf_path, path); + wcscpy(server_data->wpath, wpath); + + if (nr_threads < 1) + nr_threads = 1; + + for (k = 0; k < nr_threads; k++) { + struct ipc_server_thread_data *std; + + std = xcalloc(1, sizeof(*std)); + std->magic = MAGIC_SERVER_THREAD_DATA; + std->server_data = server_data; + std->hPipe = INVALID_HANDLE_VALUE; + + std->hPipe = (k == 0) + ? hPipeFirst + : create_new_pipe(server_data->wpath, 0); + + if (std->hPipe == INVALID_HANDLE_VALUE) { + /* + * If we've reached a pipe instance limit for + * this path, just use fewer threads. + */ + free(std); + break; + } + + if (pthread_create(&std->pthread_id, NULL, + server_thread_proc, std)) { + /* + * Likewise, if we're out of threads, just use + * fewer threads than requested. + * + * However, we just give up if we can't even get + * one thread. This should not happen. + */ + if (k == 0) + die(_("could not start thread[0] for '%s'"), + path); + + CloseHandle(std->hPipe); + free(std); + break; + } + + std->next_thread = server_data->thread_list; + server_data->thread_list = std; + } + + *returned_server_data = server_data; + return 0; +} + +int ipc_server_stop_async(struct ipc_server_data *server_data) +{ + if (!server_data) + return 0; + + /* + * Gently tell all of the ipc_server threads to shutdown. + * This will be seen the next time they are idle (and waiting + * for a connection). + * + * We DO NOT attempt to force them to drop an active connection. + */ + SetEvent(server_data->hEventStopRequested); + return 0; +} + +int ipc_server_await(struct ipc_server_data *server_data) +{ + DWORD dwWaitResult; + + if (!server_data) + return 0; + + dwWaitResult = WaitForSingleObject(server_data->hEventStopRequested, INFINITE); + if (dwWaitResult != WAIT_OBJECT_0) + return error(_("wait for hEvent failed for '%s'"), + server_data->buf_path.buf); + + while (server_data->thread_list) { + struct ipc_server_thread_data *std = server_data->thread_list; + + pthread_join(std->pthread_id, NULL); + + server_data->thread_list = std->next_thread; + free(std); + } + + server_data->is_stopped = 1; + + return 0; +} + +void ipc_server_free(struct ipc_server_data *server_data) +{ + if (!server_data) + return; + + if (!server_data->is_stopped) + BUG("cannot free ipc-server while running for '%s'", + server_data->buf_path.buf); + + strbuf_release(&server_data->buf_path); + + if (server_data->hEventStopRequested != INVALID_HANDLE_VALUE) + CloseHandle(server_data->hEventStopRequested); + + while (server_data->thread_list) { + struct ipc_server_thread_data *std = server_data->thread_list; + + server_data->thread_list = std->next_thread; + free(std); + } + + free(server_data); +} diff --git a/config.mak.uname b/config.mak.uname index d204c20a64..cb443b4e02 100644 --- a/config.mak.uname +++ b/config.mak.uname @@ -424,6 +424,7 @@ ifeq ($(uname_S),Windows) RUNTIME_PREFIX = YesPlease HAVE_WPGMPTR = YesWeDo NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease + USE_WIN32_IPC = YesPlease USE_WIN32_MMAP = YesPlease MMAP_PREVENTS_DELETE = UnfortunatelyYes # USE_NED_ALLOCATOR = YesPlease @@ -600,6 +601,7 @@ ifneq (,$(findstring MINGW,$(uname_S))) RUNTIME_PREFIX = YesPlease HAVE_WPGMPTR = YesWeDo NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease + USE_WIN32_IPC = YesPlease USE_WIN32_MMAP = YesPlease MMAP_PREVENTS_DELETE = UnfortunatelyYes USE_NED_ALLOCATOR = YesPlease diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt index ac3dbc079a..75ed198a6a 100644 --- a/contrib/buildsystems/CMakeLists.txt +++ b/contrib/buildsystems/CMakeLists.txt @@ -58,6 +58,10 @@ if(WIN32) # In the vcpkg edition, we need this to be able to link to libcurl set(CURL_NO_CURL_CMAKE ON) + + # Copy the necessary vcpkg DLLs (like iconv) to the install dir + set(X_VCPKG_APPLOCAL_DEPS_INSTALL ON) + set(CMAKE_TOOLCHAIN_FILE ${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake CACHE STRING "Vcpkg toolchain file") endif() find_program(SH_EXE sh PATHS "C:/Program Files/Git/bin") @@ -243,7 +247,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Windows") elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux") add_compile_definitions(PROCFS_EXECUTABLE_PATH="/proc/self/exe" HAVE_DEV_TTY ) - list(APPEND compat_SOURCES unix-socket.c) + list(APPEND compat_SOURCES unix-socket.c unix-stream-server.c) +endif() + +if(CMAKE_SYSTEM_NAME STREQUAL "Windows") + list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-win32.c) +else() + list(APPEND compat_SOURCES compat/simple-ipc/ipc-shared.c compat/simple-ipc/ipc-unix-socket.c) endif() set(EXE_EXTENSION ${CMAKE_EXECUTABLE_SUFFIX}) @@ -685,13 +695,17 @@ endif() parse_makefile_for_executables(git_builtin_extra "BUILT_INS") +option(SKIP_DASHED_BUILT_INS "Skip hardlinking the dashed versions of the built-ins") + #Creating hardlinks +if(NOT SKIP_DASHED_BUILT_INS) foreach(s ${git_SOURCES} ${git_builtin_extra}) string(REPLACE "${CMAKE_SOURCE_DIR}/builtin/" "" s ${s}) string(REPLACE ".c" "" s ${s}) file(APPEND ${CMAKE_BINARY_DIR}/CreateLinks.cmake "file(CREATE_LINK git${EXE_EXTENSION} git-${s}${EXE_EXTENSION})\n") list(APPEND git_links ${CMAKE_BINARY_DIR}/git-${s}${EXE_EXTENSION}) endforeach() +endif() if(CURL_FOUND) set(remote_exes @@ -807,15 +821,19 @@ list(TRANSFORM git_shell_scripts PREPEND "${CMAKE_BINARY_DIR}/") list(TRANSFORM git_perl_scripts PREPEND "${CMAKE_BINARY_DIR}/") #install -install(TARGETS git git-shell +foreach(program ${PROGRAMS_BUILT}) +if(program STREQUAL "git" OR program STREQUAL "git-shell") +install(TARGETS ${program} RUNTIME DESTINATION bin) +else() +install(TARGETS ${program} + RUNTIME DESTINATION libexec/git-core) +endif() +endforeach() + install(PROGRAMS ${CMAKE_BINARY_DIR}/git-cvsserver DESTINATION bin) -list(REMOVE_ITEM PROGRAMS_BUILT git git-shell) -install(TARGETS ${PROGRAMS_BUILT} - RUNTIME DESTINATION libexec/git-core) - set(bin_links git-receive-pack git-upload-archive git-upload-pack) @@ -828,12 +846,12 @@ install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git-shell${EXE_EXTENS foreach(b ${git_links}) string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b}) - install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})") + install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/bin/git${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})") endforeach() foreach(b ${git_http_links}) string(REPLACE "${CMAKE_BINARY_DIR}" "" b ${b}) - install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b}${EXE_EXTENSION})") + install(CODE "file(CREATE_LINK ${CMAKE_INSTALL_PREFIX}/libexec/git-core/git-remote-http${EXE_EXTENSION} ${CMAKE_INSTALL_PREFIX}/libexec/git-core/${b})") endforeach() install(PROGRAMS ${git_shell_scripts} ${git_perl_scripts} ${CMAKE_BINARY_DIR}/git-p4 @@ -24,17 +24,6 @@ #define CONVERT_STAT_BITS_TXT_CRLF 0x2 #define CONVERT_STAT_BITS_BIN 0x4 -enum crlf_action { - CRLF_UNDEFINED, - CRLF_BINARY, - CRLF_TEXT, - CRLF_TEXT_INPUT, - CRLF_TEXT_CRLF, - CRLF_AUTO, - CRLF_AUTO_INPUT, - CRLF_AUTO_CRLF -}; - struct text_stat { /* NUL, CR, LF and CRLF counts */ unsigned nul, lonecr, lonelf, crlf; @@ -172,7 +161,7 @@ static int text_eol_is_crlf(void) return 0; } -static enum eol output_eol(enum crlf_action crlf_action) +static enum eol output_eol(enum convert_crlf_action crlf_action) { switch (crlf_action) { case CRLF_BINARY: @@ -246,7 +235,7 @@ static int has_crlf_in_index(const struct index_state *istate, const char *path) } static int will_convert_lf_to_crlf(struct text_stat *stats, - enum crlf_action crlf_action) + enum convert_crlf_action crlf_action) { if (output_eol(crlf_action) != EOL_CRLF) return 0; @@ -499,7 +488,7 @@ static int encode_to_worktree(const char *path, const char *src, size_t src_len, static int crlf_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *buf, - enum crlf_action crlf_action, int conv_flags) + enum convert_crlf_action crlf_action, int conv_flags) { struct text_stat stats; char *dst; @@ -585,8 +574,8 @@ static int crlf_to_git(const struct index_state *istate, return 1; } -static int crlf_to_worktree(const char *src, size_t len, - struct strbuf *buf, enum crlf_action crlf_action) +static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf, + enum convert_crlf_action crlf_action) { char *to_free = NULL; struct text_stat stats; @@ -884,9 +873,13 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len goto done; if (fd >= 0) - err = write_packetized_from_fd(fd, process->in); + err = write_packetized_from_fd_no_flush(fd, process->in); else - err = write_packetized_from_buf(src, len, process->in); + err = write_packetized_from_buf_no_flush(src, len, process->in); + if (err) + goto done; + + err = packet_flush_gently(process->in); if (err) goto done; @@ -903,7 +896,8 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len if (err) goto done; - err = read_packetized_to_strbuf(process->out, &nbuf) < 0; + err = read_packetized_to_strbuf(process->out, &nbuf, + PACKET_READ_GENTLE_ON_EOF) < 0; if (err) goto done; @@ -1247,7 +1241,7 @@ static const char *git_path_check_encoding(struct attr_check_item *check) return value; } -static enum crlf_action git_path_check_crlf(struct attr_check_item *check) +static enum convert_crlf_action git_path_check_crlf(struct attr_check_item *check) { const char *value = check->value; @@ -1297,18 +1291,10 @@ static int git_path_check_ident(struct attr_check_item *check) return !!ATTR_TRUE(value); } -struct conv_attrs { - struct convert_driver *drv; - enum crlf_action attr_action; /* What attr says */ - enum crlf_action crlf_action; /* When no attr is set, use core.autocrlf */ - int ident; - const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */ -}; - static struct attr_check *check; -static void convert_attrs(const struct index_state *istate, - struct conv_attrs *ca, const char *path) +void convert_attrs(const struct index_state *istate, + struct conv_attrs *ca, const char *path) { struct attr_check_item *ccheck = NULL; @@ -1465,19 +1451,16 @@ void convert_to_git_filter_fd(const struct index_state *istate, ident_to_git(dst->buf, dst->len, dst, ca.ident); } -static int convert_to_working_tree_internal(const struct index_state *istate, - const char *path, const char *src, - size_t len, struct strbuf *dst, - int normalizing, - const struct checkout_metadata *meta, - struct delayed_checkout *dco) +static int convert_to_working_tree_ca_internal(const struct conv_attrs *ca, + const char *path, const char *src, + size_t len, struct strbuf *dst, + int normalizing, + const struct checkout_metadata *meta, + struct delayed_checkout *dco) { int ret = 0, ret_filter = 0; - struct conv_attrs ca; - convert_attrs(istate, &ca, path); - - ret |= ident_to_worktree(src, len, dst, ca.ident); + ret |= ident_to_worktree(src, len, dst, ca->ident); if (ret) { src = dst->buf; len = dst->len; @@ -1487,49 +1470,56 @@ static int convert_to_working_tree_internal(const struct index_state *istate, * is a smudge or process filter (even if the process filter doesn't * support smudge). The filters might expect CRLFs. */ - if ((ca.drv && (ca.drv->smudge || ca.drv->process)) || !normalizing) { - ret |= crlf_to_worktree(src, len, dst, ca.crlf_action); + if ((ca->drv && (ca->drv->smudge || ca->drv->process)) || !normalizing) { + ret |= crlf_to_worktree(src, len, dst, ca->crlf_action); if (ret) { src = dst->buf; len = dst->len; } } - ret |= encode_to_worktree(path, src, len, dst, ca.working_tree_encoding); + ret |= encode_to_worktree(path, src, len, dst, ca->working_tree_encoding); if (ret) { src = dst->buf; len = dst->len; } ret_filter = apply_filter( - path, src, len, -1, dst, ca.drv, CAP_SMUDGE, meta, dco); - if (!ret_filter && ca.drv && ca.drv->required) - die(_("%s: smudge filter %s failed"), path, ca.drv->name); + path, src, len, -1, dst, ca->drv, CAP_SMUDGE, meta, dco); + if (!ret_filter && ca->drv && ca->drv->required) + die(_("%s: smudge filter %s failed"), path, ca->drv->name); return ret | ret_filter; } -int async_convert_to_working_tree(const struct index_state *istate, - const char *path, const char *src, - size_t len, struct strbuf *dst, - const struct checkout_metadata *meta, - void *dco) +int async_convert_to_working_tree_ca(const struct conv_attrs *ca, + const char *path, const char *src, + size_t len, struct strbuf *dst, + const struct checkout_metadata *meta, + void *dco) { - return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, dco); + return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0, + meta, dco); } -int convert_to_working_tree(const struct index_state *istate, - const char *path, const char *src, - size_t len, struct strbuf *dst, - const struct checkout_metadata *meta) +int convert_to_working_tree_ca(const struct conv_attrs *ca, + const char *path, const char *src, + size_t len, struct strbuf *dst, + const struct checkout_metadata *meta) { - return convert_to_working_tree_internal(istate, path, src, len, dst, 0, meta, NULL); + return convert_to_working_tree_ca_internal(ca, path, src, len, dst, 0, + meta, NULL); } int renormalize_buffer(const struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst) { - int ret = convert_to_working_tree_internal(istate, path, src, len, dst, 1, NULL, NULL); + struct conv_attrs ca; + int ret; + + convert_attrs(istate, &ca, path); + ret = convert_to_working_tree_ca_internal(&ca, path, src, len, dst, 1, + NULL, NULL); if (ret) { src = dst->buf; len = dst->len; @@ -1956,34 +1946,25 @@ static struct stream_filter *ident_filter(const struct object_id *oid) } /* - * Return an appropriately constructed filter for the path, or NULL if + * Return an appropriately constructed filter for the given ca, or NULL if * the contents cannot be filtered without reading the whole thing * in-core. * * Note that you would be crazy to set CRLF, smudge/clean or ident to a * large binary blob you would want us not to slurp into the memory! */ -struct stream_filter *get_stream_filter(const struct index_state *istate, - const char *path, - const struct object_id *oid) +struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca, + const struct object_id *oid) { - struct conv_attrs ca; struct stream_filter *filter = NULL; - convert_attrs(istate, &ca, path); - if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean)) + if (classify_conv_attrs(ca) != CA_CLASS_STREAMABLE) return NULL; - if (ca.working_tree_encoding) - return NULL; - - if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF) - return NULL; - - if (ca.ident) + if (ca->ident) filter = ident_filter(oid); - if (output_eol(ca.crlf_action) == EOL_CRLF) + if (output_eol(ca->crlf_action) == EOL_CRLF) filter = cascade_filter(filter, lf_to_crlf_filter()); else filter = cascade_filter(filter, &null_filter_singleton); @@ -1991,6 +1972,15 @@ struct stream_filter *get_stream_filter(const struct index_state *istate, return filter; } +struct stream_filter *get_stream_filter(const struct index_state *istate, + const char *path, + const struct object_id *oid) +{ + struct conv_attrs ca; + convert_attrs(istate, &ca, path); + return get_stream_filter_ca(&ca, oid); +} + void free_stream_filter(struct stream_filter *filter) { filter->vtbl->free(filter); @@ -2024,3 +2014,21 @@ void clone_checkout_metadata(struct checkout_metadata *dst, if (blob) oidcpy(&dst->blob, blob); } + +enum conv_attrs_classification classify_conv_attrs(const struct conv_attrs *ca) +{ + if (ca->drv) { + if (ca->drv->process) + return CA_CLASS_INCORE_PROCESS; + if (ca->drv->smudge || ca->drv->clean) + return CA_CLASS_INCORE_FILTER; + } + + if (ca->working_tree_encoding) + return CA_CLASS_INCORE; + + if (ca->crlf_action == CRLF_AUTO || ca->crlf_action == CRLF_AUTO_CRLF) + return CA_CLASS_INCORE; + + return CA_CLASS_STREAMABLE; +} @@ -63,6 +63,30 @@ struct checkout_metadata { struct object_id blob; }; +enum convert_crlf_action { + CRLF_UNDEFINED, + CRLF_BINARY, + CRLF_TEXT, + CRLF_TEXT_INPUT, + CRLF_TEXT_CRLF, + CRLF_AUTO, + CRLF_AUTO_INPUT, + CRLF_AUTO_CRLF +}; + +struct convert_driver; + +struct conv_attrs { + struct convert_driver *drv; + enum convert_crlf_action attr_action; /* What attr says */ + enum convert_crlf_action crlf_action; /* When no attr is set, use core.autocrlf */ + int ident; + const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */ +}; + +void convert_attrs(const struct index_state *istate, + struct conv_attrs *ca, const char *path); + extern enum eol core_eol; extern char *check_roundtrip_encoding; const char *get_cached_convert_stats_ascii(const struct index_state *istate, @@ -75,15 +99,34 @@ const char *get_convert_attr_ascii(const struct index_state *istate, int convert_to_git(const struct index_state *istate, const char *path, const char *src, size_t len, struct strbuf *dst, int conv_flags); -int convert_to_working_tree(const struct index_state *istate, - const char *path, const char *src, - size_t len, struct strbuf *dst, - const struct checkout_metadata *meta); -int async_convert_to_working_tree(const struct index_state *istate, - const char *path, const char *src, - size_t len, struct strbuf *dst, - const struct checkout_metadata *meta, - void *dco); +int convert_to_working_tree_ca(const struct conv_attrs *ca, + const char *path, const char *src, + size_t len, struct strbuf *dst, + const struct checkout_metadata *meta); +int async_convert_to_working_tree_ca(const struct conv_attrs *ca, + const char *path, const char *src, + size_t len, struct strbuf *dst, + const struct checkout_metadata *meta, + void *dco); +static inline int convert_to_working_tree(const struct index_state *istate, + const char *path, const char *src, + size_t len, struct strbuf *dst, + const struct checkout_metadata *meta) +{ + struct conv_attrs ca; + convert_attrs(istate, &ca, path); + return convert_to_working_tree_ca(&ca, path, src, len, dst, meta); +} +static inline int async_convert_to_working_tree(const struct index_state *istate, + const char *path, const char *src, + size_t len, struct strbuf *dst, + const struct checkout_metadata *meta, + void *dco) +{ + struct conv_attrs ca; + convert_attrs(istate, &ca, path); + return async_convert_to_working_tree_ca(&ca, path, src, len, dst, meta, dco); +} int async_query_available_blobs(const char *cmd, struct string_list *available_paths); int renormalize_buffer(const struct index_state *istate, @@ -136,6 +179,8 @@ struct stream_filter; /* opaque */ struct stream_filter *get_stream_filter(const struct index_state *istate, const char *path, const struct object_id *); +struct stream_filter *get_stream_filter_ca(const struct conv_attrs *ca, + const struct object_id *oid); void free_stream_filter(struct stream_filter *); int is_null_stream_filter(struct stream_filter *); @@ -155,4 +200,37 @@ int stream_filter(struct stream_filter *, const char *input, size_t *isize_p, char *output, size_t *osize_p); +enum conv_attrs_classification { + /* + * The blob must be loaded into a buffer before it can be + * smudged. All smudging is done in-proc. + */ + CA_CLASS_INCORE, + + /* + * The blob must be loaded into a buffer, but uses a + * single-file driver filter, such as rot13. + */ + CA_CLASS_INCORE_FILTER, + + /* + * The blob must be loaded into a buffer, but uses a + * long-running driver process, such as LFS. This might or + * might not use delayed operations. (The important thing is + * that there is a single subordinate long-running process + * handling all associated blobs and in case of delayed + * operations, may hold per-blob state.) + */ + CA_CLASS_INCORE_PROCESS, + + /* + * The blob can be streamed and smudged without needing to + * completely read it into a buffer. + */ + CA_CLASS_STREAMABLE, +}; + +enum conv_attrs_classification classify_conv_attrs( + const struct conv_attrs *ca); + #endif /* CONVERT_H */ diff --git a/csum-file.c b/csum-file.c index 0f35fa5ee4..7510950fa3 100644 --- a/csum-file.c +++ b/csum-file.c @@ -89,32 +89,35 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int fl void hashwrite(struct hashfile *f, const void *buf, unsigned int count) { while (count) { - unsigned offset = f->offset; - unsigned left = sizeof(f->buffer) - offset; + unsigned left = sizeof(f->buffer) - f->offset; unsigned nr = count > left ? left : count; - const void *data; if (f->do_crc) f->crc32 = crc32(f->crc32, buf, nr); if (nr == sizeof(f->buffer)) { - /* process full buffer directly without copy */ - data = buf; + /* + * Flush a full batch worth of data directly + * from the input, skipping the memcpy() to + * the hashfile's buffer. In this block, + * f->offset is necessarily zero. + */ + the_hash_algo->update_fn(&f->ctx, buf, nr); + flush(f, buf, nr); } else { - memcpy(f->buffer + offset, buf, nr); - data = f->buffer; + /* + * Copy to the hashfile's buffer, flushing only + * if it became full. + */ + memcpy(f->buffer + f->offset, buf, nr); + f->offset += nr; + left -= nr; + if (!left) + hashflush(f); } count -= nr; - offset += nr; buf = (char *) buf + nr; - left -= nr; - if (!left) { - the_hash_algo->update_fn(&f->ctx, data, offset); - flush(f, data, offset); - offset = 0; - } - f->offset = offset; } } diff --git a/diff-lib.c b/diff-lib.c index b73cc1859a..e5a58c9259 100644 --- a/diff-lib.c +++ b/diff-lib.c @@ -28,9 +28,10 @@ * exists for ce that is a submodule -- it is a submodule that is not * checked out). Return negative for an error. */ -static int check_removed(const struct cache_entry *ce, struct stat *st) +static int check_removed(const struct index_state *istate, const struct cache_entry *ce, struct stat *st) { - if (lstat(ce->name, st) < 0) { + assert(is_fsmonitor_refreshed(istate)); + if (!(ce->ce_flags & CE_FSMONITOR_VALID) && lstat(ce->name, st) < 0) { if (!is_missing_file_error(errno)) return -1; return 1; @@ -136,7 +137,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) memset(&(dpath->parent[0]), 0, sizeof(struct combine_diff_parent)*5); - changed = check_removed(ce, &st); + changed = check_removed(istate, ce, &st); if (!changed) wt_mode = ce_mode_from_stat(ce, st.st_mode); else { @@ -216,7 +217,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option) } else { struct stat st; - changed = check_removed(ce, &st); + changed = check_removed(istate, ce, &st); if (changed) { if (changed < 0) { perror(ce->name); @@ -278,7 +279,8 @@ static void diff_index_show_file(struct rev_info *revs, oid, oid_valid, ce->name, dirty_submodule); } -static int get_stat_data(const struct cache_entry *ce, +static int get_stat_data(const struct index_state *istate, + const struct cache_entry *ce, const struct object_id **oidp, unsigned int *modep, int cached, int match_missing, @@ -290,7 +292,7 @@ static int get_stat_data(const struct cache_entry *ce, if (!cached && !ce_uptodate(ce)) { int changed; struct stat st; - changed = check_removed(ce, &st); + changed = check_removed(istate, ce, &st); if (changed < 0) return -1; else if (changed) { @@ -321,12 +323,13 @@ static void show_new_file(struct rev_info *revs, const struct object_id *oid; unsigned int mode; unsigned dirty_submodule = 0; + struct index_state *istate = revs->diffopt.repo->index; /* * New file in the index: it might actually be different in * the working tree. */ - if (get_stat_data(new_file, &oid, &mode, cached, match_missing, + if (get_stat_data(istate, new_file, &oid, &mode, cached, match_missing, &dirty_submodule, &revs->diffopt) < 0) return; @@ -342,8 +345,9 @@ static int show_modified(struct rev_info *revs, unsigned int mode, oldmode; const struct object_id *oid; unsigned dirty_submodule = 0; + struct index_state *istate = revs->diffopt.repo->index; - if (get_stat_data(new_entry, &oid, &mode, cached, match_missing, + if (get_stat_data(istate, new_entry, &oid, &mode, cached, match_missing, &dirty_submodule, &revs->diffopt) < 0) { if (report_missing) diff_index_show_file(revs, "-", old_entry, @@ -574,6 +578,7 @@ int run_diff_index(struct rev_info *revs, unsigned int option) struct object_id oid; const char *name; char merge_base_hex[GIT_MAX_HEXSZ + 1]; + struct index_state *istate = revs->diffopt.repo->index; if (revs->pending.nr != 1) BUG("run_diff_index must be passed exactly one tree"); @@ -581,6 +586,8 @@ int run_diff_index(struct rev_info *revs, unsigned int option) trace_performance_enter(); ent = revs->pending.objects; + refresh_fsmonitor(istate); + if (merge_base) { diff_get_merge_base(revs, &oid); name = oid_to_hex_r(merge_base_hex, &oid); @@ -6,6 +6,7 @@ #include "submodule.h" #include "progress.h" #include "fsmonitor.h" +#include "entry.h" static void create_directories(const char *path, int path_len, const struct checkout *state) @@ -83,7 +84,7 @@ static int create_file(const char *path, unsigned int mode) return open(path, O_WRONLY | O_CREAT | O_EXCL, mode); } -static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size) +void *read_blob_entry(const struct cache_entry *ce, unsigned long *size) { enum object_type type; void *blob_data = read_object_file(&ce->oid, &type, size); @@ -108,7 +109,7 @@ static int open_output_fd(char *path, const struct cache_entry *ce, int to_tempf } } -static int fstat_output(int fd, const struct checkout *state, struct stat *st) +int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st) { /* use fstat() only when path == ce->name */ if (fstat_is_reliable() && @@ -131,7 +132,7 @@ static int streaming_write_entry(const struct cache_entry *ce, char *path, return -1; result |= stream_blob_to_fd(fd, &ce->oid, filter, 1); - *fstat_done = fstat_output(fd, state, statbuf); + *fstat_done = fstat_checkout_output(fd, state, statbuf); result |= close(fd); if (result) @@ -250,8 +251,21 @@ int finish_delayed_checkout(struct checkout *state, int *nr_checkouts) return errs; } -static int write_entry(struct cache_entry *ce, - char *path, const struct checkout *state, int to_tempfile) +void update_ce_after_write(const struct checkout *state, struct cache_entry *ce, + struct stat *st) +{ + if (state->refresh_cache) { + assert(state->istate); + fill_stat_cache_info(state->istate, ce, st); + ce->ce_flags |= CE_UPDATE_IN_BASE; + mark_fsmonitor_invalid(state->istate, ce); + state->istate->cache_changed |= CE_ENTRY_CHANGED; + } +} + +/* Note: ca is used (and required) iff the entry refers to a regular file. */ +static int write_entry(struct cache_entry *ce, char *path, struct conv_attrs *ca, + const struct checkout *state, int to_tempfile) { unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT; struct delayed_checkout *dco = state->delayed_checkout; @@ -268,8 +282,7 @@ static int write_entry(struct cache_entry *ce, clone_checkout_metadata(&meta, &state->meta, &ce->oid); if (ce_mode_s_ifmt == S_IFREG) { - struct stream_filter *filter = get_stream_filter(state->istate, ce->name, - &ce->oid); + struct stream_filter *filter = get_stream_filter_ca(ca, &ce->oid); if (filter && !streaming_write_entry(ce, path, filter, state, to_tempfile, @@ -316,14 +329,17 @@ static int write_entry(struct cache_entry *ce, * Convert from git internal format to working tree format */ if (dco && dco->state != CE_NO_DELAY) { - ret = async_convert_to_working_tree(state->istate, ce->name, new_blob, - size, &buf, &meta, dco); + ret = async_convert_to_working_tree_ca(ca, ce->name, + new_blob, size, + &buf, &meta, dco); if (ret && string_list_has_string(&dco->paths, ce->name)) { free(new_blob); goto delayed; } - } else - ret = convert_to_working_tree(state->istate, ce->name, new_blob, size, &buf, &meta); + } else { + ret = convert_to_working_tree_ca(ca, ce->name, new_blob, + size, &buf, &meta); + } if (ret) { free(new_blob); @@ -345,7 +361,7 @@ static int write_entry(struct cache_entry *ce, wrote = write_in_full(fd, new_blob, size); if (!to_tempfile) - fstat_done = fstat_output(fd, state, &st); + fstat_done = fstat_checkout_output(fd, state, &st); close(fd); free(new_blob); if (wrote < 0) @@ -370,15 +386,10 @@ static int write_entry(struct cache_entry *ce, finish: if (state->refresh_cache) { - assert(state->istate); - if (!fstat_done) - if (lstat(ce->name, &st) < 0) - return error_errno("unable to stat just-written file %s", - ce->name); - fill_stat_cache_info(state->istate, ce, &st); - ce->ce_flags |= CE_UPDATE_IN_BASE; - mark_fsmonitor_invalid(state->istate, ce); - state->istate->cache_changed |= CE_ENTRY_CHANGED; + if (!fstat_done && lstat(ce->name, &st) < 0) + return error_errno("unable to stat just-written file %s", + ce->name); + update_ce_after_write(state, ce , &st); } delayed: return 0; @@ -429,19 +440,13 @@ static void mark_colliding_entries(const struct checkout *state, } } -/* - * Write the contents from ce out to the working tree. - * - * When topath[] is not NULL, instead of writing to the working tree - * file named by ce, a temporary file is created by this function and - * its name is returned in topath[], which must be able to hold at - * least TEMPORARY_FILENAME_LENGTH bytes long. - */ -int checkout_entry(struct cache_entry *ce, const struct checkout *state, - char *topath, int *nr_checkouts) +int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca, + const struct checkout *state, char *topath, + int *nr_checkouts) { static struct strbuf path = STRBUF_INIT; struct stat st; + struct conv_attrs ca_buf; if (ce->ce_flags & CE_WT_REMOVE) { if (topath) @@ -454,8 +459,13 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state, return 0; } - if (topath) - return write_entry(ce, topath, state, 1); + if (topath) { + if (S_ISREG(ce->ce_mode) && !ca) { + convert_attrs(state->istate, &ca_buf, ce->name); + ca = &ca_buf; + } + return write_entry(ce, topath, ca, state, 1); + } strbuf_reset(&path); strbuf_add(&path, state->base_dir, state->base_dir_len); @@ -517,9 +527,16 @@ int checkout_entry(struct cache_entry *ce, const struct checkout *state, return 0; create_directories(path.buf, path.len, state); + if (nr_checkouts) (*nr_checkouts)++; - return write_entry(ce, path.buf, state, 0); + + if (S_ISREG(ce->ce_mode) && !ca) { + convert_attrs(state->istate, &ca_buf, ce->name); + ca = &ca_buf; + } + + return write_entry(ce, path.buf, ca, state, 0); } void unlink_entry(const struct cache_entry *ce) @@ -530,7 +547,7 @@ void unlink_entry(const struct cache_entry *ce) submodule_move_head(ce->name, "HEAD", NULL, SUBMODULE_MOVE_HEAD_FORCE); } - if (!check_leading_path(ce->name, ce_namelen(ce))) + if (check_leading_path(ce->name, ce_namelen(ce), 1) >= 0) return; if (remove_or_warn(ce->ce_mode, ce->name)) return; diff --git a/entry.h b/entry.h new file mode 100644 index 0000000000..b8c0e170dc --- /dev/null +++ b/entry.h @@ -0,0 +1,59 @@ +#ifndef ENTRY_H +#define ENTRY_H + +#include "cache.h" +#include "convert.h" + +struct checkout { + struct index_state *istate; + const char *base_dir; + int base_dir_len; + struct delayed_checkout *delayed_checkout; + struct checkout_metadata meta; + unsigned force:1, + quiet:1, + not_new:1, + clone:1, + refresh_cache:1; +}; +#define CHECKOUT_INIT { NULL, "" } + +#define TEMPORARY_FILENAME_LENGTH 25 +/* + * Write the contents from ce out to the working tree. + * + * When topath[] is not NULL, instead of writing to the working tree + * file named by ce, a temporary file is created by this function and + * its name is returned in topath[], which must be able to hold at + * least TEMPORARY_FILENAME_LENGTH bytes long. + * + * With checkout_entry_ca(), callers can optionally pass a preloaded + * conv_attrs struct (to avoid reloading it), when ce refers to a + * regular file. If ca is NULL, the attributes will be loaded + * internally when (and if) needed. + */ +int checkout_entry_ca(struct cache_entry *ce, struct conv_attrs *ca, + const struct checkout *state, char *topath, + int *nr_checkouts); +static inline int checkout_entry(struct cache_entry *ce, + const struct checkout *state, char *topath, + int *nr_checkouts) +{ + return checkout_entry_ca(ce, NULL, state, topath, nr_checkouts); +} + +void enable_delayed_checkout(struct checkout *state); +int finish_delayed_checkout(struct checkout *state, int *nr_checkouts); + +/* + * Unlink the last component and schedule the leading directories for + * removal, such that empty directories get removed. + */ +void unlink_entry(const struct cache_entry *ce); + +void *read_blob_entry(const struct cache_entry *ce, unsigned long *size); +int fstat_checkout_output(int fd, const struct checkout *state, struct stat *st); +void update_ce_after_write(const struct checkout *state, struct cache_entry *ce, + struct stat *st); + +#endif /* ENTRY_H */ diff --git a/fsmonitor.h b/fsmonitor.h index 7f1794b90b..f20d72631d 100644 --- a/fsmonitor.h +++ b/fsmonitor.h @@ -50,6 +50,17 @@ void refresh_fsmonitor(struct index_state *istate); int fsmonitor_is_trivial_response(const struct strbuf *query_result); /* + * Check if refresh_fsmonitor has been called at least once. + * refresh_fsmonitor is idempotent. Returns true if fsmonitor is + * not enabled (since the state will be "fresh" w/ CE_FSMONITOR_VALID unset) + * This version is useful for assertions + */ +static inline int is_fsmonitor_refreshed(const struct index_state *istate) +{ + return !core_fsmonitor || istate->fsmonitor_has_run_once; +} + +/* * Set the given cache entries CE_FSMONITOR_VALID bit. This should be * called any time the cache entry has been updated to reflect the * current state of the file on disk. diff --git a/git-send-email.perl b/git-send-email.perl index 1f425c0809..f5bbf1647e 100755 --- a/git-send-email.perl +++ b/git-send-email.perl @@ -1942,7 +1942,7 @@ sub validate_patch { my ($fn, $xfer_encoding) = @_; if ($repo) { - my $validate_hook = catfile(catdir($repo->repo_path(), 'hooks'), + my $validate_hook = catfile($repo->hooks_path(), 'sendemail-validate'); my $hook_error; if (-x $validate_hook) { @@ -1635,23 +1635,33 @@ static int handle_curl_result(struct slot_results *results) if (results->curl_result == CURLE_OK) { credential_approve(&http_auth); - if (proxy_auth.password) - credential_approve(&proxy_auth); + credential_approve(&proxy_auth); + credential_approve(&cert_auth); return HTTP_OK; + } else if (results->curl_result == CURLE_SSL_CERTPROBLEM) { + /* + * We can't tell from here whether it's a bad path, bad + * certificate, bad password, or something else wrong + * with the certificate. So we reject the credential to + * avoid caching or saving a bad password. + */ + credential_reject(&cert_auth); + return HTTP_NOAUTH; } else if (missing_target(results)) return HTTP_MISSING_TARGET; else if (results->http_code == 401) { +#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY + http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE; + if (results->auth_avail) { + http_auth_methods &= results->auth_avail; + http_auth_methods_restricted = 1; + return HTTP_REAUTH; + } +#endif if (http_auth.username && http_auth.password) { credential_reject(&http_auth); return HTTP_NOAUTH; } else { -#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY - http_auth_methods &= ~CURLAUTH_GSSNEGOTIATE; - if (results->auth_avail) { - http_auth_methods &= results->auth_avail; - http_auth_methods_restricted = 1; - } -#endif return HTTP_REAUTH; } } else { diff --git a/log-tree.c b/log-tree.c index 4531cebfab..f3178a66a9 100644 --- a/log-tree.c +++ b/log-tree.c @@ -369,8 +369,14 @@ void fmt_output_subject(struct strbuf *filename, int start_len = filename->len; int max_len = start_len + info->patch_name_max - (strlen(suffix) + 1); - if (0 < info->reroll_count) - strbuf_addf(filename, "v%d-", info->reroll_count); + if (info->reroll_count) { + struct strbuf temp = STRBUF_INIT; + + strbuf_addf(&temp, "v%s", info->reroll_count); + format_sanitized_subject(filename, temp.buf, temp.len); + strbuf_addstr(filename, "-"); + strbuf_release(&temp); + } strbuf_addf(filename, "%04d-%s", nr, subject); if (max_len < filename->len) diff --git a/merge-recursive.c b/merge-recursive.c index b69e694d98..ed31f9496c 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -453,7 +453,7 @@ static void unpack_trees_finish(struct merge_options *opt) static int save_files_dirs(const struct object_id *oid, struct strbuf *base, const char *path, - unsigned int mode, int stage, void *context) + unsigned int mode, void *context) { struct path_hashmap_entry *entry; int baselen = base->len; @@ -473,8 +473,8 @@ static void get_files_dirs(struct merge_options *opt, struct tree *tree) { struct pathspec match_all; memset(&match_all, 0, sizeof(match_all)); - read_tree_recursive(opt->repo, tree, "", 0, 0, - &match_all, save_files_dirs, opt); + read_tree(opt->repo, tree, + &match_all, save_files_dirs, opt); } static int get_tree_entry_if_blob(struct repository *r, diff --git a/object-store.h b/object-store.h index 541dab0858..ec32c23dcb 100644 --- a/object-store.h +++ b/object-store.h @@ -153,6 +153,11 @@ struct raw_object_store { /* A most-recently-used ordered version of the packed_git list. */ struct list_head packed_git_mru; + struct { + struct packed_git **packs; + unsigned flags; + } kept_pack_cache; + /* * A map of packfiles to packed_git structs for tracking which * packs have been loaded already. diff --git a/packfile.c b/packfile.c index ea29f4ba77..6661f3325a 100644 --- a/packfile.c +++ b/packfile.c @@ -2066,12 +2066,79 @@ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pa return 0; } +static void maybe_invalidate_kept_pack_cache(struct repository *r, + unsigned flags) +{ + if (!r->objects->kept_pack_cache.packs) + return; + if (r->objects->kept_pack_cache.flags == flags) + return; + FREE_AND_NULL(r->objects->kept_pack_cache.packs); + r->objects->kept_pack_cache.flags = 0; +} + +static struct packed_git **kept_pack_cache(struct repository *r, unsigned flags) +{ + maybe_invalidate_kept_pack_cache(r, flags); + + if (!r->objects->kept_pack_cache.packs) { + struct packed_git **packs = NULL; + size_t nr = 0, alloc = 0; + struct packed_git *p; + + /* + * We want "all" packs here, because we need to cover ones that + * are used by a midx, as well. We need to look in every one of + * them (instead of the midx itself) to cover duplicates. It's + * possible that an object is found in two packs that the midx + * covers, one kept and one not kept, but the midx returns only + * the non-kept version. + */ + for (p = get_all_packs(r); p; p = p->next) { + if ((p->pack_keep && (flags & ON_DISK_KEEP_PACKS)) || + (p->pack_keep_in_core && (flags & IN_CORE_KEEP_PACKS))) { + ALLOC_GROW(packs, nr + 1, alloc); + packs[nr++] = p; + } + } + ALLOC_GROW(packs, nr + 1, alloc); + packs[nr] = NULL; + + r->objects->kept_pack_cache.packs = packs; + r->objects->kept_pack_cache.flags = flags; + } + + return r->objects->kept_pack_cache.packs; +} + +int find_kept_pack_entry(struct repository *r, + const struct object_id *oid, + unsigned flags, + struct pack_entry *e) +{ + struct packed_git **cache; + + for (cache = kept_pack_cache(r, flags); *cache; cache++) { + struct packed_git *p = *cache; + if (fill_pack_entry(oid, e, p)) + return 1; + } + + return 0; +} + int has_object_pack(const struct object_id *oid) { struct pack_entry e; return find_pack_entry(the_repository, oid, &e); } +int has_object_kept_pack(const struct object_id *oid, unsigned flags) +{ + struct pack_entry e; + return find_kept_pack_entry(the_repository, oid, flags, &e); +} + int has_pack_index(const unsigned char *sha1) { struct stat st; diff --git a/packfile.h b/packfile.h index 4cfec9e8d3..3ae117a8ae 100644 --- a/packfile.h +++ b/packfile.h @@ -162,13 +162,18 @@ int packed_object_info(struct repository *r, void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1); const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1); +#define ON_DISK_KEEP_PACKS 1 +#define IN_CORE_KEEP_PACKS 2 + /* * Iff a pack file in the given repository contains the object named by sha1, * return true and store its location to e. */ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e); +int find_kept_pack_entry(struct repository *r, const struct object_id *oid, unsigned flags, struct pack_entry *e); int has_object_pack(const struct object_id *oid); +int has_object_kept_pack(const struct object_id *oid, unsigned flags); int has_pack_index(const unsigned char *sha1); diff --git a/parse-options.c b/parse-options.c index fbea16eaf5..e6f56768ca 100644 --- a/parse-options.c +++ b/parse-options.c @@ -625,6 +625,8 @@ static int show_gitcomp(const struct option *opts, int show_all) * * Right now this is only used to preprocess and substitute * OPTION_ALIAS. + * + * The returned options should be freed using free_preprocessed_options. */ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx, const struct option *options) @@ -678,6 +680,7 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx, newopt[i].short_name = short_name; newopt[i].long_name = long_name; newopt[i].help = strbuf_detach(&help, NULL); + newopt[i].flags |= PARSE_OPT_FROM_ALIAS; break; } @@ -693,6 +696,20 @@ static struct option *preprocess_options(struct parse_opt_ctx_t *ctx, return newopt; } +static void free_preprocessed_options(struct option *options) +{ + int i; + + if (!options) + return; + + for (i = 0; options[i].type != OPTION_END; i++) { + if (options[i].flags & PARSE_OPT_FROM_ALIAS) + free((void *)options[i].help); + } + free(options); +} + static int usage_with_options_internal(struct parse_opt_ctx_t *, const char * const *, const struct option *, int, int); @@ -870,7 +887,7 @@ int parse_options(int argc, const char **argv, const char *prefix, } precompose_argv_prefix(argc, argv, NULL); - free(real_options); + free_preprocessed_options(real_options); free(ctx.alias_groups); return parse_options_end(&ctx); } diff --git a/parse-options.h b/parse-options.h index ff6506a504..a845a9d952 100644 --- a/parse-options.h +++ b/parse-options.h @@ -28,26 +28,27 @@ enum parse_opt_type { }; enum parse_opt_flags { - PARSE_OPT_KEEP_DASHDASH = 1, - PARSE_OPT_STOP_AT_NON_OPTION = 2, - PARSE_OPT_KEEP_ARGV0 = 4, - PARSE_OPT_KEEP_UNKNOWN = 8, - PARSE_OPT_NO_INTERNAL_HELP = 16, - PARSE_OPT_ONE_SHOT = 32 + PARSE_OPT_KEEP_DASHDASH = 1 << 0, + PARSE_OPT_STOP_AT_NON_OPTION = 1 << 1, + PARSE_OPT_KEEP_ARGV0 = 1 << 2, + PARSE_OPT_KEEP_UNKNOWN = 1 << 3, + PARSE_OPT_NO_INTERNAL_HELP = 1 << 4, + PARSE_OPT_ONE_SHOT = 1 << 5, }; enum parse_opt_option_flags { - PARSE_OPT_OPTARG = 1, - PARSE_OPT_NOARG = 2, - PARSE_OPT_NONEG = 4, - PARSE_OPT_HIDDEN = 8, - PARSE_OPT_LASTARG_DEFAULT = 16, - PARSE_OPT_NODASH = 32, - PARSE_OPT_LITERAL_ARGHELP = 64, - PARSE_OPT_SHELL_EVAL = 256, - PARSE_OPT_NOCOMPLETE = 512, - PARSE_OPT_COMP_ARG = 1024, - PARSE_OPT_CMDMODE = 2048 + PARSE_OPT_OPTARG = 1 << 0, + PARSE_OPT_NOARG = 1 << 1, + PARSE_OPT_NONEG = 1 << 2, + PARSE_OPT_HIDDEN = 1 << 3, + PARSE_OPT_LASTARG_DEFAULT = 1 << 4, + PARSE_OPT_NODASH = 1 << 5, + PARSE_OPT_LITERAL_ARGHELP = 1 << 6, + PARSE_OPT_FROM_ALIAS = 1 << 7, + PARSE_OPT_SHELL_EVAL = 1 << 8, + PARSE_OPT_NOCOMPLETE = 1 << 9, + PARSE_OPT_COMP_ARG = 1 << 10, + PARSE_OPT_CMDMODE = 1 << 11, }; enum parse_opt_result { diff --git a/perl/Git.pm b/perl/Git.pm index 02eacef0c2..73ebbf80cc 100644 --- a/perl/Git.pm +++ b/perl/Git.pm @@ -619,6 +619,19 @@ Return path to the git repository. Must be called on a repository instance. sub repo_path { $_[0]->{opts}->{Repository} } +=item hooks_path () + +Return path to the hooks directory. Must be called on a repository instance. + +=cut + +sub hooks_path { + my ($self) = @_; + + my $dir = $self->command_oneline('rev-parse', '--git-path', 'hooks'); + my $abs = abs_path($dir); + return $abs; +} =item wc_path () diff --git a/pkt-line.c b/pkt-line.c index d633005ef7..0194137528 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -196,17 +196,26 @@ int packet_write_fmt_gently(int fd, const char *fmt, ...) static int packet_write_gently(const int fd_out, const char *buf, size_t size) { - static char packet_write_buffer[LARGE_PACKET_MAX]; + char header[4]; size_t packet_size; - if (size > sizeof(packet_write_buffer) - 4) + if (size > LARGE_PACKET_DATA_MAX) return error(_("packet write failed - data exceeds max packet size")); packet_trace(buf, size, 1); packet_size = size + 4; - set_packet_header(packet_write_buffer, packet_size); - memcpy(packet_write_buffer + 4, buf, size); - if (write_in_full(fd_out, packet_write_buffer, packet_size) < 0) + + set_packet_header(header, packet_size); + + /* + * Write the header and the buffer in 2 parts so that we do + * not need to allocate a buffer or rely on a static buffer. + * This also avoids putting a large buffer on the stack which + * might have multi-threading issues. + */ + + if (write_in_full(fd_out, header, 4) < 0 || + write_in_full(fd_out, buf, size) < 0) return error(_("packet write failed")); return 0; } @@ -242,26 +251,27 @@ void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len) packet_trace(data, len, 1); } -int write_packetized_from_fd(int fd_in, int fd_out) +int write_packetized_from_fd_no_flush(int fd_in, int fd_out) { - static char buf[LARGE_PACKET_DATA_MAX]; + char *buf = xmalloc(LARGE_PACKET_DATA_MAX); int err = 0; ssize_t bytes_to_write; while (!err) { - bytes_to_write = xread(fd_in, buf, sizeof(buf)); - if (bytes_to_write < 0) + bytes_to_write = xread(fd_in, buf, LARGE_PACKET_DATA_MAX); + if (bytes_to_write < 0) { + free(buf); return COPY_READ_ERROR; + } if (bytes_to_write == 0) break; err = packet_write_gently(fd_out, buf, bytes_to_write); } - if (!err) - err = packet_flush_gently(fd_out); + free(buf); return err; } -int write_packetized_from_buf(const char *src_in, size_t len, int fd_out) +int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out) { int err = 0; size_t bytes_written = 0; @@ -277,8 +287,6 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out) err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write); bytes_written += bytes_to_write; } - if (!err) - err = packet_flush_gently(fd_out); return err; } @@ -298,8 +306,11 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size, *src_size -= ret; } else { ret = read_in_full(fd, dst, size); - if (ret < 0) + if (ret < 0) { + if (options & PACKET_READ_GENTLE_ON_READ_ERROR) + return error_errno(_("read error")); die_errno(_("read error")); + } } /* And complain if we didn't get enough bytes to satisfy the read. */ @@ -307,6 +318,8 @@ static int get_packet_data(int fd, char **src_buf, size_t *src_size, if (options & PACKET_READ_GENTLE_ON_EOF) return -1; + if (options & PACKET_READ_GENTLE_ON_READ_ERROR) + return error(_("the remote end hung up unexpectedly")); die(_("the remote end hung up unexpectedly")); } @@ -335,6 +348,9 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer, len = packet_length(linelen); if (len < 0) { + if (options & PACKET_READ_GENTLE_ON_READ_ERROR) + return error(_("protocol error: bad line length " + "character: %.4s"), linelen); die(_("protocol error: bad line length character: %.4s"), linelen); } else if (!len) { packet_trace("0000", 4, 0); @@ -349,12 +365,19 @@ enum packet_read_status packet_read_with_status(int fd, char **src_buffer, *pktlen = 0; return PACKET_READ_RESPONSE_END; } else if (len < 4) { + if (options & PACKET_READ_GENTLE_ON_READ_ERROR) + return error(_("protocol error: bad line length %d"), + len); die(_("protocol error: bad line length %d"), len); } len -= 4; - if ((unsigned)len >= size) + if ((unsigned)len >= size) { + if (options & PACKET_READ_GENTLE_ON_READ_ERROR) + return error(_("protocol error: bad line length %d"), + len); die(_("protocol error: bad line length %d"), len); + } if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) { *pktlen = -1; @@ -421,7 +444,7 @@ char *packet_read_line_buf(char **src, size_t *src_len, int *dst_len) return packet_read_line_generic(-1, src, src_len, dst_len); } -ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out) +ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options) { int packet_len; @@ -437,7 +460,7 @@ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out) * that there is already room for the extra byte. */ sb_out->buf + sb_out->len, LARGE_PACKET_DATA_MAX+1, - PACKET_READ_GENTLE_ON_EOF); + options); if (packet_len <= 0) break; sb_out->len += packet_len; diff --git a/pkt-line.h b/pkt-line.h index 8c90daa59e..5af5f45687 100644 --- a/pkt-line.h +++ b/pkt-line.h @@ -32,8 +32,8 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len); int packet_flush_gently(int fd); int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3))); -int write_packetized_from_fd(int fd_in, int fd_out); -int write_packetized_from_buf(const char *src_in, size_t len, int fd_out); +int write_packetized_from_fd_no_flush(int fd_in, int fd_out); +int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out); /* * Read a packetized line into the buffer, which must be at least size bytes @@ -68,10 +68,15 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out); * * If options contains PACKET_READ_DIE_ON_ERR_PACKET, it dies when it sees an * ERR packet. + * + * If options contains PACKET_READ_GENTLE_ON_READ_ERROR, we will not die + * on read errors, but instead return -1. However, we may still die on an + * ERR packet (if requested). */ -#define PACKET_READ_GENTLE_ON_EOF (1u<<0) -#define PACKET_READ_CHOMP_NEWLINE (1u<<1) -#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2) +#define PACKET_READ_GENTLE_ON_EOF (1u<<0) +#define PACKET_READ_CHOMP_NEWLINE (1u<<1) +#define PACKET_READ_DIE_ON_ERR_PACKET (1u<<2) +#define PACKET_READ_GENTLE_ON_READ_ERROR (1u<<3) int packet_read(int fd, char **src_buffer, size_t *src_len, char *buffer, unsigned size, int options); @@ -131,7 +136,7 @@ char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size); /* * Reads a stream of variable sized packets until a flush packet is detected. */ -ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out); +ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out, int options); /* * Receive multiplexed output stream over git native protocol. diff --git a/rebase-interactive.c b/rebase-interactive.c index 762853bc7e..b6cbd16a17 100644 --- a/rebase-interactive.c +++ b/rebase-interactive.c @@ -44,7 +44,10 @@ void append_todo_help(int command_count, "r, reword <commit> = use commit, but edit the commit message\n" "e, edit <commit> = use commit, but stop for amending\n" "s, squash <commit> = use commit, but meld into previous commit\n" -"f, fixup <commit> = like \"squash\", but discard this commit's log message\n" +"f, fixup [-C | -c] <commit> = like \"squash\" but keep only the previous\n" +" commit's log message, unless -C is used, in which case\n" +" keep only this commit's message; -c is same as -C but\n" +" opens the editor\n" "x, exec <command> = run command (the rest of the line) using shell\n" "b, break = stop here (continue rebase later with 'git rebase --continue')\n" "d, drop <commit> = remove commit\n" @@ -53,7 +56,7 @@ void append_todo_help(int command_count, "m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n" ". create a merge commit using the original merge commit's\n" ". message (or the oneline, if no original merge commit was\n" -". specified). Use -c <commit> to reword the commit message.\n" +". specified); use -c <commit> to reword the commit message\n" "\n" "These lines can be re-ordered; they are executed from top to bottom.\n"); unsigned edit_todo = !(shortrevisions && shortonto); diff --git a/revision.c b/revision.c index 99c859f797..553c0faa9b 100644 --- a/revision.c +++ b/revision.c @@ -2336,6 +2336,16 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg revs->unpacked = 1; } else if (starts_with(arg, "--unpacked=")) { die(_("--unpacked=<packfile> no longer supported")); + } else if (!strcmp(arg, "--no-kept-objects")) { + revs->no_kept_objects = 1; + revs->keep_pack_cache_flags |= IN_CORE_KEEP_PACKS; + revs->keep_pack_cache_flags |= ON_DISK_KEEP_PACKS; + } else if (skip_prefix(arg, "--no-kept-objects=", &optarg)) { + revs->no_kept_objects = 1; + if (!strcmp(optarg, "in-core")) + revs->keep_pack_cache_flags |= IN_CORE_KEEP_PACKS; + if (!strcmp(optarg, "on-disk")) + revs->keep_pack_cache_flags |= ON_DISK_KEEP_PACKS; } else if (!strcmp(arg, "-r")) { revs->diff = 1; revs->diffopt.flags.recursive = 1; @@ -3795,6 +3805,11 @@ enum commit_action get_commit_action(struct rev_info *revs, struct commit *commi return commit_ignore; if (revs->unpacked && has_object_pack(&commit->object.oid)) return commit_ignore; + if (revs->no_kept_objects) { + if (has_object_kept_pack(&commit->object.oid, + revs->keep_pack_cache_flags)) + return commit_ignore; + } if (commit->object.flags & UNINTERESTING) return commit_ignore; if (revs->line_level_traverse && !want_ancestry(revs)) { diff --git a/revision.h b/revision.h index e6be3c845e..a24f72dcd1 100644 --- a/revision.h +++ b/revision.h @@ -148,6 +148,7 @@ struct rev_info { edge_hint_aggressive:1, limited:1, unpacked:1, + no_kept_objects:1, boundary:2, count:1, left_right:1, @@ -235,7 +236,7 @@ struct rev_info { const char *mime_boundary; const char *patch_suffix; int numbered_files; - int reroll_count; + const char *reroll_count; char *message_id; struct ident_split from_ident; struct string_list *ref_message_ids; @@ -317,6 +318,9 @@ struct rev_info { * This is loaded from the commit-graph being used. */ struct bloom_filter_settings *bloom_filter_settings; + + /* misc. flags related to '--no-kept-objects' */ + unsigned keep_pack_cache_flags; }; int ref_excluded(struct string_list *, const char *path); diff --git a/sequencer.c b/sequencer.c index 848204d3dc..b4c63ae207 100644 --- a/sequencer.c +++ b/sequencer.c @@ -1726,13 +1726,183 @@ static int is_pick_or_similar(enum todo_command command) } } +enum todo_item_flags { + TODO_EDIT_MERGE_MSG = (1 << 0), + TODO_REPLACE_FIXUP_MSG = (1 << 1), + TODO_EDIT_FIXUP_MSG = (1 << 2), +}; + +static const char first_commit_msg_str[] = N_("This is the 1st commit message:"); +static const char nth_commit_msg_fmt[] = N_("This is the commit message #%d:"); +static const char skip_first_commit_msg_str[] = N_("The 1st commit message will be skipped:"); +static const char skip_nth_commit_msg_fmt[] = N_("The commit message #%d will be skipped:"); +static const char combined_commit_msg_fmt[] = N_("This is a combination of %d commits."); + +static int is_fixup_flag(enum todo_command command, unsigned flag) +{ + return command == TODO_FIXUP && ((flag & TODO_REPLACE_FIXUP_MSG) || + (flag & TODO_EDIT_FIXUP_MSG)); +} + +/* + * Wrapper around strbuf_add_commented_lines() which avoids double + * commenting commit subjects. + */ +static void add_commented_lines(struct strbuf *buf, const void *str, size_t len) +{ + const char *s = str; + while (len > 0 && s[0] == comment_line_char) { + size_t count; + const char *n = memchr(s, '\n', len); + if (!n) + count = len; + else + count = n - s + 1; + strbuf_add(buf, s, count); + s += count; + len -= count; + } + strbuf_add_commented_lines(buf, s, len); +} + +/* Does the current fixup chain contain a squash command? */ +static int seen_squash(struct replay_opts *opts) +{ + return starts_with(opts->current_fixups.buf, "squash") || + strstr(opts->current_fixups.buf, "\nsquash"); +} + +static void update_comment_bufs(struct strbuf *buf1, struct strbuf *buf2, int n) +{ + strbuf_setlen(buf1, 2); + strbuf_addf(buf1, _(nth_commit_msg_fmt), n); + strbuf_addch(buf1, '\n'); + strbuf_setlen(buf2, 2); + strbuf_addf(buf2, _(skip_nth_commit_msg_fmt), n); + strbuf_addch(buf2, '\n'); +} + +/* + * Comment out any un-commented commit messages, updating the message comments + * to say they will be skipped but do not comment out the empty lines that + * surround commit messages and their comments. + */ +static void update_squash_message_for_fixup(struct strbuf *msg) +{ + void (*copy_lines)(struct strbuf *, const void *, size_t) = strbuf_add; + struct strbuf buf1 = STRBUF_INIT, buf2 = STRBUF_INIT; + const char *s, *start; + char *orig_msg; + size_t orig_msg_len; + int i = 1; + + strbuf_addf(&buf1, "# %s\n", _(first_commit_msg_str)); + strbuf_addf(&buf2, "# %s\n", _(skip_first_commit_msg_str)); + s = start = orig_msg = strbuf_detach(msg, &orig_msg_len); + while (s) { + const char *next; + size_t off; + if (skip_prefix(s, buf1.buf, &next)) { + /* + * Copy the last message, preserving the blank line + * preceding the current line + */ + off = (s > start + 1 && s[-2] == '\n') ? 1 : 0; + copy_lines(msg, start, s - start - off); + if (off) + strbuf_addch(msg, '\n'); + /* + * The next message needs to be commented out but the + * message header is already commented out so just copy + * it and the blank line that follows it. + */ + strbuf_addbuf(msg, &buf2); + if (*next == '\n') + strbuf_addch(msg, *next++); + start = s = next; + copy_lines = add_commented_lines; + update_comment_bufs(&buf1, &buf2, ++i); + } else if (skip_prefix(s, buf2.buf, &next)) { + off = (s > start + 1 && s[-2] == '\n') ? 1 : 0; + copy_lines(msg, start, s - start - off); + start = s - off; + s = next; + copy_lines = strbuf_add; + update_comment_bufs(&buf1, &buf2, ++i); + } else { + s = strchr(s, '\n'); + if (s) + s++; + } + } + copy_lines(msg, start, orig_msg_len - (start - orig_msg)); + free(orig_msg); + strbuf_release(&buf1); + strbuf_release(&buf2); +} + +static int append_squash_message(struct strbuf *buf, const char *body, + enum todo_command command, struct replay_opts *opts, + unsigned flag) +{ + const char *fixup_msg; + size_t commented_len = 0, fixup_off; + /* + * amend is non-interactive and not normally used with fixup! + * or squash! commits, so only comment out those subjects when + * squashing commit messages. + */ + if (starts_with(body, "amend!") || + ((command == TODO_SQUASH || seen_squash(opts)) && + (starts_with(body, "squash!") || starts_with(body, "fixup!")))) + commented_len = commit_subject_length(body); + + strbuf_addf(buf, "\n%c ", comment_line_char); + strbuf_addf(buf, _(nth_commit_msg_fmt), + ++opts->current_fixup_count + 1); + strbuf_addstr(buf, "\n\n"); + strbuf_add_commented_lines(buf, body, commented_len); + /* buf->buf may be reallocated so store an offset into the buffer */ + fixup_off = buf->len; + strbuf_addstr(buf, body + commented_len); + + /* fixup -C after squash behaves like squash */ + if (is_fixup_flag(command, flag) && !seen_squash(opts)) { + /* + * We're replacing the commit message so we need to + * append the Signed-off-by: trailer if the user + * requested '--signoff'. + */ + if (opts->signoff) + append_signoff(buf, 0, 0); + + if ((command == TODO_FIXUP) && + (flag & TODO_REPLACE_FIXUP_MSG) && + (file_exists(rebase_path_fixup_msg()) || + !file_exists(rebase_path_squash_msg()))) { + fixup_msg = skip_blank_lines(buf->buf + fixup_off); + if (write_message(fixup_msg, strlen(fixup_msg), + rebase_path_fixup_msg(), 0) < 0) + return error(_("cannot write '%s'"), + rebase_path_fixup_msg()); + } else { + unlink(rebase_path_fixup_msg()); + } + } else { + unlink(rebase_path_fixup_msg()); + } + + return 0; +} + static int update_squash_messages(struct repository *r, enum todo_command command, struct commit *commit, - struct replay_opts *opts) + struct replay_opts *opts, + unsigned flag) { struct strbuf buf = STRBUF_INIT; - int res; + int res = 0; const char *message, *body; const char *encoding = get_commit_output_encoding(); @@ -1748,10 +1918,12 @@ static int update_squash_messages(struct repository *r, buf.buf : strchrnul(buf.buf, '\n'); strbuf_addf(&header, "%c ", comment_line_char); - strbuf_addf(&header, _("This is a combination of %d commits."), + strbuf_addf(&header, _(combined_commit_msg_fmt), opts->current_fixup_count + 2); strbuf_splice(&buf, 0, eol - buf.buf, header.buf, header.len); strbuf_release(&header); + if (is_fixup_flag(command, flag) && !seen_squash(opts)) + update_squash_message_for_fixup(&buf); } else { struct object_id head; struct commit *head_commit; @@ -1765,19 +1937,22 @@ static int update_squash_messages(struct repository *r, return error(_("could not read HEAD's commit message")); find_commit_subject(head_message, &body); - if (write_message(body, strlen(body), - rebase_path_fixup_msg(), 0)) { + if (command == TODO_FIXUP && !flag && write_message(body, strlen(body), + rebase_path_fixup_msg(), 0) < 0) { unuse_commit_buffer(head_commit, head_message); - return error(_("cannot write '%s'"), - rebase_path_fixup_msg()); + return error(_("cannot write '%s'"), rebase_path_fixup_msg()); } - strbuf_addf(&buf, "%c ", comment_line_char); - strbuf_addf(&buf, _("This is a combination of %d commits."), 2); + strbuf_addf(&buf, _(combined_commit_msg_fmt), 2); strbuf_addf(&buf, "\n%c ", comment_line_char); - strbuf_addstr(&buf, _("This is the 1st commit message:")); + strbuf_addstr(&buf, is_fixup_flag(command, flag) ? + _(skip_first_commit_msg_str) : + _(first_commit_msg_str)); strbuf_addstr(&buf, "\n\n"); - strbuf_addstr(&buf, body); + if (is_fixup_flag(command, flag)) + strbuf_add_commented_lines(&buf, body, strlen(body)); + else + strbuf_addstr(&buf, body); unuse_commit_buffer(head_commit, head_message); } @@ -1787,16 +1962,11 @@ static int update_squash_messages(struct repository *r, oid_to_hex(&commit->object.oid)); find_commit_subject(message, &body); - if (command == TODO_SQUASH) { - unlink(rebase_path_fixup_msg()); - strbuf_addf(&buf, "\n%c ", comment_line_char); - strbuf_addf(&buf, _("This is the commit message #%d:"), - ++opts->current_fixup_count + 1); - strbuf_addstr(&buf, "\n\n"); - strbuf_addstr(&buf, body); + if (command == TODO_SQUASH || is_fixup_flag(command, flag)) { + res = append_squash_message(&buf, body, command, opts, flag); } else if (command == TODO_FIXUP) { strbuf_addf(&buf, "\n%c ", comment_line_char); - strbuf_addf(&buf, _("The commit message #%d will be skipped:"), + strbuf_addf(&buf, _(skip_nth_commit_msg_fmt), ++opts->current_fixup_count + 1); strbuf_addstr(&buf, "\n\n"); strbuf_add_commented_lines(&buf, body, strlen(body)); @@ -1804,7 +1974,9 @@ static int update_squash_messages(struct repository *r, return error(_("unknown command: %d"), command); unuse_commit_buffer(commit, message); - res = write_message(buf.buf, buf.len, rebase_path_squash_msg(), 0); + if (!res) + res = write_message(buf.buf, buf.len, rebase_path_squash_msg(), + 0); strbuf_release(&buf); if (!res) { @@ -1861,8 +2033,7 @@ static void record_in_rewritten(struct object_id *oid, } static int do_pick_commit(struct repository *r, - enum todo_command command, - struct commit *commit, + struct todo_item *item, struct replay_opts *opts, int final_fixup, int *check_todo) { @@ -1875,6 +2046,8 @@ static int do_pick_commit(struct repository *r, struct commit_message msg = { NULL, NULL, NULL, NULL }; struct strbuf msgbuf = STRBUF_INIT; int res, unborn = 0, reword = 0, allow, drop_commit; + enum todo_command command = item->command; + struct commit *commit = item->commit; if (opts->no_commit) { /* @@ -2004,7 +2177,8 @@ static int do_pick_commit(struct repository *r, if (command == TODO_REWORD) reword = 1; else if (is_fixup(command)) { - if (update_squash_messages(r, command, commit, opts)) + if (update_squash_messages(r, command, commit, + opts, item->flags)) return -1; flags |= AMEND_MSG; if (!final_fixup) @@ -2169,10 +2343,6 @@ static int read_and_refresh_cache(struct repository *r, return 0; } -enum todo_item_flags { - TODO_EDIT_MERGE_MSG = 1 -}; - void todo_list_release(struct todo_list *todo_list) { strbuf_release(&todo_list->buf); @@ -2259,6 +2429,18 @@ static int parse_insn_line(struct repository *r, struct todo_item *item, return 0; } + if (item->command == TODO_FIXUP) { + if (skip_prefix(bol, "-C", &bol) && + (*bol == ' ' || *bol == '\t')) { + bol += strspn(bol, " \t"); + item->flags |= TODO_REPLACE_FIXUP_MSG; + } else if (skip_prefix(bol, "-c", &bol) && + (*bol == ' ' || *bol == '\t')) { + bol += strspn(bol, " \t"); + item->flags |= TODO_EDIT_FIXUP_MSG; + } + } + if (item->command == TODO_MERGE) { if (skip_prefix(bol, "-C", &bol)) bol += strspn(bol, " \t"); @@ -4124,8 +4306,8 @@ static int pick_commits(struct repository *r, setenv(GIT_REFLOG_ACTION, reflog_message(opts, command_to_string(item->command), NULL), 1); - res = do_pick_commit(r, item->command, item->commit, - opts, is_final_fixup(todo_list), + res = do_pick_commit(r, item, opts, + is_final_fixup(todo_list), &check_todo); if (is_rebase_i(opts)) setenv(GIT_REFLOG_ACTION, prev_reflog_action, 1); @@ -4587,11 +4769,14 @@ static int single_pick(struct repository *r, struct replay_opts *opts) { int check_todo; + struct todo_item item; + + item.command = opts->action == REPLAY_PICK ? + TODO_PICK : TODO_REVERT; + item.commit = cmit; setenv(GIT_REFLOG_ACTION, action_name(opts), 0); - return do_pick_commit(r, opts->action == REPLAY_PICK ? - TODO_PICK : TODO_REVERT, cmit, opts, 0, - &check_todo); + return do_pick_commit(r, &item, opts, 0, &check_todo); } int sequencer_pick_revisions(struct repository *r, @@ -5262,6 +5447,14 @@ static void todo_list_to_strbuf(struct repository *r, struct todo_list *todo_lis short_commit_name(item->commit) : oid_to_hex(&item->commit->object.oid); + if (item->command == TODO_FIXUP) { + if (item->flags & TODO_EDIT_FIXUP_MSG) + strbuf_addstr(buf, " -c"); + else if (item->flags & TODO_REPLACE_FIXUP_MSG) { + strbuf_addstr(buf, " -C"); + } + } + if (item->command == TODO_MERGE) { if (item->flags & TODO_EDIT_MERGE_MSG) strbuf_addstr(buf, " -c"); @@ -5462,6 +5655,12 @@ static int subject2item_cmp(const void *fndata, define_commit_slab(commit_todo_item, struct todo_item *); +static int skip_fixupish(const char *subject, const char **p) { + return skip_prefix(subject, "fixup! ", p) || + skip_prefix(subject, "amend! ", p) || + skip_prefix(subject, "squash! ", p); +} + /* * Rearrange the todo list that has both "pick commit-id msg" and "pick * commit-id fixup!/squash! msg" in it so that the latter is put immediately @@ -5520,15 +5719,13 @@ int todo_list_rearrange_squash(struct todo_list *todo_list) format_subject(&buf, subject, " "); subject = subjects[i] = strbuf_detach(&buf, &subject_len); unuse_commit_buffer(item->commit, commit_buffer); - if ((skip_prefix(subject, "fixup! ", &p) || - skip_prefix(subject, "squash! ", &p))) { + if (skip_fixupish(subject, &p)) { struct commit *commit2; for (;;) { while (isspace(*p)) p++; - if (!skip_prefix(p, "fixup! ", &p) && - !skip_prefix(p, "squash! ", &p)) + if (!skip_fixupish(p, &p)) break; } @@ -5558,9 +5755,14 @@ int todo_list_rearrange_squash(struct todo_list *todo_list) } if (i2 >= 0) { rearranged = 1; - todo_list->items[i].command = - starts_with(subject, "fixup!") ? - TODO_FIXUP : TODO_SQUASH; + if (starts_with(subject, "fixup!")) { + todo_list->items[i].command = TODO_FIXUP; + } else if (starts_with(subject, "amend!")) { + todo_list->items[i].command = TODO_FIXUP; + todo_list->items[i].flags = TODO_REPLACE_FIXUP_MSG; + } else { + todo_list->items[i].command = TODO_SQUASH; + } if (tail[i2] < 0) { next[i] = next[i2]; next[i2] = i; diff --git a/simple-ipc.h b/simple-ipc.h new file mode 100644 index 0000000000..dc3606e30b --- /dev/null +++ b/simple-ipc.h @@ -0,0 +1,239 @@ +#ifndef GIT_SIMPLE_IPC_H +#define GIT_SIMPLE_IPC_H + +/* + * See Documentation/technical/api-simple-ipc.txt + */ + +#if defined(GIT_WINDOWS_NATIVE) || !defined(NO_UNIX_SOCKETS) +#define SUPPORTS_SIMPLE_IPC +#endif + +#ifdef SUPPORTS_SIMPLE_IPC +#include "pkt-line.h" + +/* + * Simple IPC Client Side API. + */ + +enum ipc_active_state { + /* + * The pipe/socket exists and the daemon is waiting for connections. + */ + IPC_STATE__LISTENING = 0, + + /* + * The pipe/socket exists, but the daemon is not listening. + * Perhaps it is very busy. + * Perhaps the daemon died without deleting the path. + * Perhaps it is shutting down and draining existing clients. + * Perhaps it is dead, but other clients are lingering and + * still holding a reference to the pathname. + */ + IPC_STATE__NOT_LISTENING, + + /* + * The requested pathname is bogus and no amount of retries + * will fix that. + */ + IPC_STATE__INVALID_PATH, + + /* + * The requested pathname is not found. This usually means + * that there is no daemon present. + */ + IPC_STATE__PATH_NOT_FOUND, + + IPC_STATE__OTHER_ERROR, +}; + +struct ipc_client_connect_options { + /* + * Spin under timeout if the server is running but can't + * accept our connection yet. This should always be set + * unless you just want to poke the server and see if it + * is alive. + */ + unsigned int wait_if_busy:1; + + /* + * Spin under timeout if the pipe/socket is not yet present + * on the file system. This is useful if we just started + * the service and need to wait for it to become ready. + */ + unsigned int wait_if_not_found:1; + + /* + * Disallow chdir() when creating a Unix domain socket. + */ + unsigned int uds_disallow_chdir:1; +}; + +#define IPC_CLIENT_CONNECT_OPTIONS_INIT { \ + .wait_if_busy = 0, \ + .wait_if_not_found = 0, \ + .uds_disallow_chdir = 0, \ +} + +/* + * Determine if a server is listening on this named pipe or socket using + * platform-specific logic. This might just probe the filesystem or it + * might make a trivial connection to the server using this pathname. + */ +enum ipc_active_state ipc_get_active_state(const char *path); + +struct ipc_client_connection { + int fd; +}; + +/* + * Try to connect to the daemon on the named pipe or socket. + * + * Returns IPC_STATE__LISTENING and a connection handle. + * + * Otherwise, returns info to help decide whether to retry or to + * spawn/respawn the server. + */ +enum ipc_active_state ipc_client_try_connect( + const char *path, + const struct ipc_client_connect_options *options, + struct ipc_client_connection **p_connection); + +void ipc_client_close_connection(struct ipc_client_connection *connection); + +/* + * Used by the client to synchronously send and receive a message with + * the server on the provided client connection. + * + * Returns 0 when successful. + * + * Calls error() and returns non-zero otherwise. + */ +int ipc_client_send_command_to_connection( + struct ipc_client_connection *connection, + const char *message, struct strbuf *answer); + +/* + * Used by the client to synchronously connect and send and receive a + * message to the server listening at the given path. + * + * Returns 0 when successful. + * + * Calls error() and returns non-zero otherwise. + */ +int ipc_client_send_command(const char *path, + const struct ipc_client_connect_options *options, + const char *message, struct strbuf *answer); + +/* + * Simple IPC Server Side API. + */ + +struct ipc_server_reply_data; + +typedef int (ipc_server_reply_cb)(struct ipc_server_reply_data *, + const char *response, + size_t response_len); + +/* + * Prototype for an application-supplied callback to process incoming + * client IPC messages and compose a reply. The `application_cb` should + * use the provided `reply_cb` and `reply_data` to send an IPC response + * back to the client. The `reply_cb` callback can be called multiple + * times for chunking purposes. A reply message is optional and may be + * omitted if not necessary for the application. + * + * The return value from the application callback is ignored. + * The value `SIMPLE_IPC_QUIT` can be used to shutdown the server. + */ +typedef int (ipc_server_application_cb)(void *application_data, + const char *request, + ipc_server_reply_cb *reply_cb, + struct ipc_server_reply_data *reply_data); + +#define SIMPLE_IPC_QUIT -2 + +/* + * Opaque instance data to represent an IPC server instance. + */ +struct ipc_server_data; + +/* + * Control parameters for the IPC server instance. + * Use this to hide platform-specific settings. + */ +struct ipc_server_opts +{ + int nr_threads; + + /* + * Disallow chdir() when creating a Unix domain socket. + */ + unsigned int uds_disallow_chdir:1; +}; + +/* + * Start an IPC server instance in one or more background threads + * and return a handle to the pool. + * + * Returns 0 if the asynchronous server pool was started successfully. + * Returns -1 if not. + * Returns -2 if we could not startup because another server is using + * the socket or named pipe. + * + * When a client IPC message is received, the `application_cb` will be + * called (possibly on a random thread) to handle the message and + * optionally compose a reply message. + */ +int ipc_server_run_async(struct ipc_server_data **returned_server_data, + const char *path, const struct ipc_server_opts *opts, + ipc_server_application_cb *application_cb, + void *application_data); + +/* + * Gently signal the IPC server pool to shutdown. No new client + * connections will be accepted, but existing connections will be + * allowed to complete. + */ +int ipc_server_stop_async(struct ipc_server_data *server_data); + +/* + * Block the calling thread until all threads in the IPC server pool + * have completed and been joined. + */ +int ipc_server_await(struct ipc_server_data *server_data); + +/* + * Close and free all resource handles associated with the IPC server + * pool. + */ +void ipc_server_free(struct ipc_server_data *server_data); + +/* + * Run an IPC server instance and block the calling thread of the + * current process. It does not return until the IPC server has + * either shutdown or had an unrecoverable error. + * + * The IPC server handles incoming IPC messages from client processes + * and may use one or more background threads as necessary. + * + * Returns 0 after the server has completed successfully. + * Returns -1 if the server cannot be started. + * Returns -2 if we could not startup because another server is using + * the socket or named pipe. + * + * When a client IPC message is received, the `application_cb` will be + * called (possibly on a random thread) to handle the message and + * optionally compose a reply message. + * + * Note that `ipc_server_run()` is a synchronous wrapper around the + * above asynchronous routines. It effectively hides all of the + * server state and thread details from the caller and presents a + * simple synchronous interface. + */ +int ipc_server_run(const char *path, const struct ipc_server_opts *opts, + ipc_server_application_cb *application_cb, + void *application_data); + +#endif /* SUPPORTS_SIMPLE_IPC */ +#endif /* GIT_SIMPLE_IPC_H */ diff --git a/symlinks.c b/symlinks.c index 7dbb6b23d9..5232d02020 100644 --- a/symlinks.c +++ b/symlinks.c @@ -1,6 +1,7 @@ #include "cache.h" -static int threaded_check_leading_path(struct cache_def *cache, const char *name, int len); +static int threaded_check_leading_path(struct cache_def *cache, const char *name, + int len, int warn_on_lstat_err); static int threaded_has_dirs_only_path(struct cache_def *cache, const char *name, int len, int prefix_len); /* @@ -72,7 +73,7 @@ static int lstat_cache_matchlen(struct cache_def *cache, int prefix_len_stat_func) { int match_len, last_slash, last_slash_dir, previous_slash; - int save_flags, ret; + int save_flags, ret, saved_errno = 0; struct stat st; if (cache->track_flags != track_flags || @@ -139,6 +140,7 @@ static int lstat_cache_matchlen(struct cache_def *cache, if (ret) { *ret_flags = FL_LSTATERR; + saved_errno = errno; if (errno == ENOENT) *ret_flags |= FL_NOENT; } else if (S_ISDIR(st.st_mode)) { @@ -180,6 +182,8 @@ static int lstat_cache_matchlen(struct cache_def *cache, } else { reset_lstat_cache(cache); } + if (saved_errno) + errno = saved_errno; return match_len; } @@ -202,57 +206,47 @@ int threaded_has_symlink_leading_path(struct cache_def *cache, const char *name, return lstat_cache(cache, name, len, FL_SYMLINK|FL_DIR, USE_ONLY_LSTAT) & FL_SYMLINK; } -/* - * Return non-zero if path 'name' has a leading symlink component - */ int has_symlink_leading_path(const char *name, int len) { return threaded_has_symlink_leading_path(&default_cache, name, len); } -/* - * Return zero if path 'name' has a leading symlink component or - * if some leading path component does not exists. - * - * Return -1 if leading path exists and is a directory. - * - * Return path length if leading path exists and is neither a - * directory nor a symlink. - */ -int check_leading_path(const char *name, int len) +int check_leading_path(const char *name, int len, int warn_on_lstat_err) { - return threaded_check_leading_path(&default_cache, name, len); + return threaded_check_leading_path(&default_cache, name, len, + warn_on_lstat_err); } /* - * Return zero if path 'name' has a leading symlink component or - * if some leading path component does not exists. + * Return zero if some leading path component of 'name' does not exist. * * Return -1 if leading path exists and is a directory. * - * Return path length if leading path exists and is neither a - * directory nor a symlink. + * Return the length of a leading component if it either exists but it's not a + * directory, or if we were unable to lstat() it. If warn_on_lstat_err is true, + * also emit a warning for this error. */ -static int threaded_check_leading_path(struct cache_def *cache, const char *name, int len) +static int threaded_check_leading_path(struct cache_def *cache, const char *name, + int len, int warn_on_lstat_err) { int flags; int match_len = lstat_cache_matchlen(cache, name, len, &flags, FL_SYMLINK|FL_NOENT|FL_DIR, USE_ONLY_LSTAT); + int saved_errno = errno; + if (flags & FL_NOENT) return 0; else if (flags & FL_DIR) return -1; - else - return match_len; + if (warn_on_lstat_err && (flags & FL_LSTATERR)) { + char *path = xmemdupz(name, match_len); + errno = saved_errno; + warning_errno(_("failed to lstat '%s'"), path); + free(path); + } + return match_len; } -/* - * Return non-zero if all path components of 'name' exists as a - * directory. If prefix_len > 0, we will test with the stat() - * function instead of the lstat() function for a prefix length of - * 'prefix_len', thus we then allow for symlinks in the prefix part as - * long as those points to real existing directories. - */ int has_dirs_only_path(const char *name, int len, int prefix_len) { return threaded_has_dirs_only_path(&default_cache, name, len, prefix_len); diff --git a/t/helper/test-chmtime.c b/t/helper/test-chmtime.c index aa22af48c2..524b55ca49 100644 --- a/t/helper/test-chmtime.c +++ b/t/helper/test-chmtime.c @@ -109,9 +109,9 @@ int cmd__chmtime(int argc, const char **argv) uintmax_t mtime; if (stat(argv[i], &sb) < 0) { - fprintf(stderr, "Failed to stat %s: %s\n", + fprintf(stderr, "Failed to stat %s: %s. Skipping\n", argv[i], strerror(errno)); - return 1; + continue; } #ifdef GIT_WINDOWS_NATIVE diff --git a/t/helper/test-simple-ipc.c b/t/helper/test-simple-ipc.c new file mode 100644 index 0000000000..42040ef81b --- /dev/null +++ b/t/helper/test-simple-ipc.c @@ -0,0 +1,787 @@ +/* + * test-simple-ipc.c: verify that the Inter-Process Communication works. + */ + +#include "test-tool.h" +#include "cache.h" +#include "strbuf.h" +#include "simple-ipc.h" +#include "parse-options.h" +#include "thread-utils.h" +#include "strvec.h" + +#ifndef SUPPORTS_SIMPLE_IPC +int cmd__simple_ipc(int argc, const char **argv) +{ + die("simple IPC not available on this platform"); +} +#else + +/* + * The test daemon defines an "application callback" that supports a + * series of commands (see `test_app_cb()`). + * + * Unknown commands are caught here and we send an error message back + * to the client process. + */ +static int app__unhandled_command(const char *command, + ipc_server_reply_cb *reply_cb, + struct ipc_server_reply_data *reply_data) +{ + struct strbuf buf = STRBUF_INIT; + int ret; + + strbuf_addf(&buf, "unhandled command: %s", command); + ret = reply_cb(reply_data, buf.buf, buf.len); + strbuf_release(&buf); + + return ret; +} + +/* + * Reply with a single very large buffer. This is to ensure that + * long response are properly handled -- whether the chunking occurs + * in the kernel or in the (probably pkt-line) layer. + */ +#define BIG_ROWS (10000) +static int app__big_command(ipc_server_reply_cb *reply_cb, + struct ipc_server_reply_data *reply_data) +{ + struct strbuf buf = STRBUF_INIT; + int row; + int ret; + + for (row = 0; row < BIG_ROWS; row++) + strbuf_addf(&buf, "big: %.75d\n", row); + + ret = reply_cb(reply_data, buf.buf, buf.len); + strbuf_release(&buf); + + return ret; +} + +/* + * Reply with a series of lines. This is to ensure that we can incrementally + * compute the response and chunk it to the client. + */ +#define CHUNK_ROWS (10000) +static int app__chunk_command(ipc_server_reply_cb *reply_cb, + struct ipc_server_reply_data *reply_data) +{ + struct strbuf buf = STRBUF_INIT; + int row; + int ret; + + for (row = 0; row < CHUNK_ROWS; row++) { + strbuf_setlen(&buf, 0); + strbuf_addf(&buf, "big: %.75d\n", row); + ret = reply_cb(reply_data, buf.buf, buf.len); + } + + strbuf_release(&buf); + + return ret; +} + +/* + * Slowly reply with a series of lines. This is to model an expensive to + * compute chunked response (which might happen if this callback is running + * in a thread and is fighting for a lock with other threads). + */ +#define SLOW_ROWS (1000) +#define SLOW_DELAY_MS (10) +static int app__slow_command(ipc_server_reply_cb *reply_cb, + struct ipc_server_reply_data *reply_data) +{ + struct strbuf buf = STRBUF_INIT; + int row; + int ret; + + for (row = 0; row < SLOW_ROWS; row++) { + strbuf_setlen(&buf, 0); + strbuf_addf(&buf, "big: %.75d\n", row); + ret = reply_cb(reply_data, buf.buf, buf.len); + sleep_millisec(SLOW_DELAY_MS); + } + + strbuf_release(&buf); + + return ret; +} + +/* + * The client sent a command followed by a (possibly very) large buffer. + */ +static int app__sendbytes_command(const char *received, + ipc_server_reply_cb *reply_cb, + struct ipc_server_reply_data *reply_data) +{ + struct strbuf buf_resp = STRBUF_INIT; + const char *p = "?"; + int len_ballast = 0; + int k; + int errs = 0; + int ret; + + if (skip_prefix(received, "sendbytes ", &p)) + len_ballast = strlen(p); + + /* + * Verify that the ballast is n copies of a single letter. + * And that the multi-threaded IO layer didn't cross the streams. + */ + for (k = 1; k < len_ballast; k++) + if (p[k] != p[0]) + errs++; + + if (errs) + strbuf_addf(&buf_resp, "errs:%d\n", errs); + else + strbuf_addf(&buf_resp, "rcvd:%c%08d\n", p[0], len_ballast); + + ret = reply_cb(reply_data, buf_resp.buf, buf_resp.len); + + strbuf_release(&buf_resp); + + return ret; +} + +/* + * An arbitrary fixed address to verify that the application instance + * data is handled properly. + */ +static int my_app_data = 42; + +static ipc_server_application_cb test_app_cb; + +/* + * This is the "application callback" that sits on top of the + * "ipc-server". It completely defines the set of commands supported + * by this application. + */ +static int test_app_cb(void *application_data, + const char *command, + ipc_server_reply_cb *reply_cb, + struct ipc_server_reply_data *reply_data) +{ + /* + * Verify that we received the application-data that we passed + * when we started the ipc-server. (We have several layers of + * callbacks calling callbacks and it's easy to get things mixed + * up (especially when some are "void*").) + */ + if (application_data != (void*)&my_app_data) + BUG("application_cb: application_data pointer wrong"); + + if (!strcmp(command, "quit")) { + /* + * The client sent a "quit" command. This is an async + * request for the server to shutdown. + * + * We DO NOT send the client a response message + * (because we have nothing to say and the other + * server threads have not yet stopped). + * + * Tell the ipc-server layer to start shutting down. + * This includes: stop listening for new connections + * on the socket/pipe and telling all worker threads + * to finish/drain their outgoing responses to other + * clients. + * + * This DOES NOT force an immediate sync shutdown. + */ + return SIMPLE_IPC_QUIT; + } + + if (!strcmp(command, "ping")) { + const char *answer = "pong"; + return reply_cb(reply_data, answer, strlen(answer)); + } + + if (!strcmp(command, "big")) + return app__big_command(reply_cb, reply_data); + + if (!strcmp(command, "chunk")) + return app__chunk_command(reply_cb, reply_data); + + if (!strcmp(command, "slow")) + return app__slow_command(reply_cb, reply_data); + + if (starts_with(command, "sendbytes ")) + return app__sendbytes_command(command, reply_cb, reply_data); + + return app__unhandled_command(command, reply_cb, reply_data); +} + +struct cl_args +{ + const char *subcommand; + const char *path; + const char *token; + + int nr_threads; + int max_wait_sec; + int bytecount; + int batchsize; + + char bytevalue; +}; + +static struct cl_args cl_args = { + .subcommand = NULL, + .path = "ipc-test", + .token = NULL, + + .nr_threads = 5, + .max_wait_sec = 60, + .bytecount = 1024, + .batchsize = 10, + + .bytevalue = 'x', +}; + +/* + * This process will run as a simple-ipc server and listen for IPC commands + * from client processes. + */ +static int daemon__run_server(void) +{ + int ret; + + struct ipc_server_opts opts = { + .nr_threads = cl_args.nr_threads, + }; + + /* + * Synchronously run the ipc-server. We don't need any application + * instance data, so pass an arbitrary pointer (that we'll later + * verify made the round trip). + */ + ret = ipc_server_run(cl_args.path, &opts, test_app_cb, (void*)&my_app_data); + if (ret == -2) + error(_("socket/pipe already in use: '%s'"), cl_args.path); + else if (ret == -1) + error_errno(_("could not start server on: '%s'"), cl_args.path); + + return ret; +} + +#ifndef GIT_WINDOWS_NATIVE +/* + * This is adapted from `daemonize()`. Use `fork()` to directly create and + * run the daemon in a child process. + */ +static int spawn_server(pid_t *pid) +{ + struct ipc_server_opts opts = { + .nr_threads = cl_args.nr_threads, + }; + + *pid = fork(); + + switch (*pid) { + case 0: + if (setsid() == -1) + error_errno(_("setsid failed")); + close(0); + close(1); + close(2); + sanitize_stdfds(); + + return ipc_server_run(cl_args.path, &opts, test_app_cb, + (void*)&my_app_data); + + case -1: + return error_errno(_("could not spawn daemon in the background")); + + default: + return 0; + } +} +#else +/* + * Conceptually like `daemonize()` but different because Windows does not + * have `fork(2)`. Spawn a normal Windows child process but without the + * limitations of `start_command()` and `finish_command()`. + */ +static int spawn_server(pid_t *pid) +{ + char test_tool_exe[MAX_PATH]; + struct strvec args = STRVEC_INIT; + int in, out; + + GetModuleFileNameA(NULL, test_tool_exe, MAX_PATH); + + in = open("/dev/null", O_RDONLY); + out = open("/dev/null", O_WRONLY); + + strvec_push(&args, test_tool_exe); + strvec_push(&args, "simple-ipc"); + strvec_push(&args, "run-daemon"); + strvec_pushf(&args, "--name=%s", cl_args.path); + strvec_pushf(&args, "--threads=%d", cl_args.nr_threads); + + *pid = mingw_spawnvpe(args.v[0], args.v, NULL, NULL, in, out, out); + close(in); + close(out); + + strvec_clear(&args); + + if (*pid < 0) + return error(_("could not spawn daemon in the background")); + + return 0; +} +#endif + +/* + * This is adapted from `wait_or_whine()`. Watch the child process and + * let it get started and begin listening for requests on the socket + * before reporting our success. + */ +static int wait_for_server_startup(pid_t pid_child) +{ + int status; + pid_t pid_seen; + enum ipc_active_state s; + time_t time_limit, now; + + time(&time_limit); + time_limit += cl_args.max_wait_sec; + + for (;;) { + pid_seen = waitpid(pid_child, &status, WNOHANG); + + if (pid_seen == -1) + return error_errno(_("waitpid failed")); + + else if (pid_seen == 0) { + /* + * The child is still running (this should be + * the normal case). Try to connect to it on + * the socket and see if it is ready for + * business. + * + * If there is another daemon already running, + * our child will fail to start (possibly + * after a timeout on the lock), but we don't + * care (who responds) if the socket is live. + */ + s = ipc_get_active_state(cl_args.path); + if (s == IPC_STATE__LISTENING) + return 0; + + time(&now); + if (now > time_limit) + return error(_("daemon not online yet")); + + continue; + } + + else if (pid_seen == pid_child) { + /* + * The new child daemon process shutdown while + * it was starting up, so it is not listening + * on the socket. + * + * Try to ping the socket in the odd chance + * that another daemon started (or was already + * running) while our child was starting. + * + * Again, we don't care who services the socket. + */ + s = ipc_get_active_state(cl_args.path); + if (s == IPC_STATE__LISTENING) + return 0; + + /* + * We don't care about the WEXITSTATUS() nor + * any of the WIF*(status) values because + * `cmd__simple_ipc()` does the `!!result` + * trick on all function return values. + * + * So it is sufficient to just report the + * early shutdown as an error. + */ + return error(_("daemon failed to start")); + } + + else + return error(_("waitpid is confused")); + } +} + +/* + * This process will start a simple-ipc server in a background process and + * wait for it to become ready. This is like `daemonize()` but gives us + * more control and better error reporting (and makes it easier to write + * unit tests). + */ +static int daemon__start_server(void) +{ + pid_t pid_child; + int ret; + + /* + * Run the actual daemon in a background process. + */ + ret = spawn_server(&pid_child); + if (pid_child <= 0) + return ret; + + /* + * Let the parent wait for the child process to get started + * and begin listening for requests on the socket. + */ + ret = wait_for_server_startup(pid_child); + + return ret; +} + +/* + * This process will run a quick probe to see if a simple-ipc server + * is active on this path. + * + * Returns 0 if the server is alive. + */ +static int client__probe_server(void) +{ + enum ipc_active_state s; + + s = ipc_get_active_state(cl_args.path); + switch (s) { + case IPC_STATE__LISTENING: + return 0; + + case IPC_STATE__NOT_LISTENING: + return error("no server listening at '%s'", cl_args.path); + + case IPC_STATE__PATH_NOT_FOUND: + return error("path not found '%s'", cl_args.path); + + case IPC_STATE__INVALID_PATH: + return error("invalid pipe/socket name '%s'", cl_args.path); + + case IPC_STATE__OTHER_ERROR: + default: + return error("other error for '%s'", cl_args.path); + } +} + +/* + * Send an IPC command token to an already-running server daemon and + * print the response. + * + * This is a simple 1 word command/token that `test_app_cb()` (in the + * daemon process) will understand. + */ +static int client__send_ipc(void) +{ + const char *command = "(no-command)"; + struct strbuf buf = STRBUF_INIT; + struct ipc_client_connect_options options + = IPC_CLIENT_CONNECT_OPTIONS_INIT; + + if (cl_args.token && *cl_args.token) + command = cl_args.token; + + options.wait_if_busy = 1; + options.wait_if_not_found = 0; + + if (!ipc_client_send_command(cl_args.path, &options, command, &buf)) { + if (buf.len) { + printf("%s\n", buf.buf); + fflush(stdout); + } + strbuf_release(&buf); + + return 0; + } + + return error("failed to send '%s' to '%s'", command, cl_args.path); +} + +/* + * Send an IPC command to an already-running server and ask it to + * shutdown. "send quit" is an async request and queues a shutdown + * event in the server, so we spin and wait here for it to actually + * shutdown to make the unit tests a little easier to write. + */ +static int client__stop_server(void) +{ + int ret; + time_t time_limit, now; + enum ipc_active_state s; + + time(&time_limit); + time_limit += cl_args.max_wait_sec; + + cl_args.token = "quit"; + + ret = client__send_ipc(); + if (ret) + return ret; + + for (;;) { + sleep_millisec(100); + + s = ipc_get_active_state(cl_args.path); + + if (s != IPC_STATE__LISTENING) { + /* + * The socket/pipe is gone and/or has stopped + * responding. Lets assume that the daemon + * process has exited too. + */ + return 0; + } + + time(&now); + if (now > time_limit) + return error(_("daemon has not shutdown yet")); + } +} + +/* + * Send an IPC command followed by ballast to confirm that a large + * message can be sent and that the kernel or pkt-line layers will + * properly chunk it and that the daemon receives the entire message. + */ +static int do_sendbytes(int bytecount, char byte, const char *path, + const struct ipc_client_connect_options *options) +{ + struct strbuf buf_send = STRBUF_INIT; + struct strbuf buf_resp = STRBUF_INIT; + + strbuf_addstr(&buf_send, "sendbytes "); + strbuf_addchars(&buf_send, byte, bytecount); + + if (!ipc_client_send_command(path, options, buf_send.buf, &buf_resp)) { + strbuf_rtrim(&buf_resp); + printf("sent:%c%08d %s\n", byte, bytecount, buf_resp.buf); + fflush(stdout); + strbuf_release(&buf_send); + strbuf_release(&buf_resp); + + return 0; + } + + return error("client failed to sendbytes(%d, '%c') to '%s'", + bytecount, byte, path); +} + +/* + * Send an IPC command with ballast to an already-running server daemon. + */ +static int client__sendbytes(void) +{ + struct ipc_client_connect_options options + = IPC_CLIENT_CONNECT_OPTIONS_INIT; + + options.wait_if_busy = 1; + options.wait_if_not_found = 0; + options.uds_disallow_chdir = 0; + + return do_sendbytes(cl_args.bytecount, cl_args.bytevalue, cl_args.path, + &options); +} + +struct multiple_thread_data { + pthread_t pthread_id; + struct multiple_thread_data *next; + const char *path; + int bytecount; + int batchsize; + int sum_errors; + int sum_good; + char letter; +}; + +static void *multiple_thread_proc(void *_multiple_thread_data) +{ + struct multiple_thread_data *d = _multiple_thread_data; + int k; + struct ipc_client_connect_options options + = IPC_CLIENT_CONNECT_OPTIONS_INIT; + + options.wait_if_busy = 1; + options.wait_if_not_found = 0; + /* + * A multi-threaded client should not be randomly calling chdir(). + * The test will pass without this restriction because the test is + * not otherwise accessing the filesystem, but it makes us honest. + */ + options.uds_disallow_chdir = 1; + + trace2_thread_start("multiple"); + + for (k = 0; k < d->batchsize; k++) { + if (do_sendbytes(d->bytecount + k, d->letter, d->path, &options)) + d->sum_errors++; + else + d->sum_good++; + } + + trace2_thread_exit(); + return NULL; +} + +/* + * Start a client-side thread pool. Each thread sends a series of + * IPC requests. Each request is on a new connection to the server. + */ +static int client__multiple(void) +{ + struct multiple_thread_data *list = NULL; + int k; + int sum_join_errors = 0; + int sum_thread_errors = 0; + int sum_good = 0; + + for (k = 0; k < cl_args.nr_threads; k++) { + struct multiple_thread_data *d = xcalloc(1, sizeof(*d)); + d->next = list; + d->path = cl_args.path; + d->bytecount = cl_args.bytecount + cl_args.batchsize*(k/26); + d->batchsize = cl_args.batchsize; + d->sum_errors = 0; + d->sum_good = 0; + d->letter = 'A' + (k % 26); + + if (pthread_create(&d->pthread_id, NULL, multiple_thread_proc, d)) { + warning("failed to create thread[%d] skipping remainder", k); + free(d); + break; + } + + list = d; + } + + while (list) { + struct multiple_thread_data *d = list; + + if (pthread_join(d->pthread_id, NULL)) + sum_join_errors++; + + sum_thread_errors += d->sum_errors; + sum_good += d->sum_good; + + list = d->next; + free(d); + } + + printf("client (good %d) (join %d), (errors %d)\n", + sum_good, sum_join_errors, sum_thread_errors); + + return (sum_join_errors + sum_thread_errors) ? 1 : 0; +} + +int cmd__simple_ipc(int argc, const char **argv) +{ + const char * const simple_ipc_usage[] = { + N_("test-helper simple-ipc is-active [<name>] [<options>]"), + N_("test-helper simple-ipc run-daemon [<name>] [<threads>]"), + N_("test-helper simple-ipc start-daemon [<name>] [<threads>] [<max-wait>]"), + N_("test-helper simple-ipc stop-daemon [<name>] [<max-wait>]"), + N_("test-helper simple-ipc send [<name>] [<token>]"), + N_("test-helper simple-ipc sendbytes [<name>] [<bytecount>] [<byte>]"), + N_("test-helper simple-ipc multiple [<name>] [<threads>] [<bytecount>] [<batchsize>]"), + NULL + }; + + const char *bytevalue = NULL; + + struct option options[] = { +#ifndef GIT_WINDOWS_NATIVE + OPT_STRING(0, "name", &cl_args.path, N_("name"), N_("name or pathname of unix domain socket")), +#else + OPT_STRING(0, "name", &cl_args.path, N_("name"), N_("named-pipe name")), +#endif + OPT_INTEGER(0, "threads", &cl_args.nr_threads, N_("number of threads in server thread pool")), + OPT_INTEGER(0, "max-wait", &cl_args.max_wait_sec, N_("seconds to wait for daemon to start or stop")), + + OPT_INTEGER(0, "bytecount", &cl_args.bytecount, N_("number of bytes")), + OPT_INTEGER(0, "batchsize", &cl_args.batchsize, N_("number of requests per thread")), + + OPT_STRING(0, "byte", &bytevalue, N_("byte"), N_("ballast character")), + OPT_STRING(0, "token", &cl_args.token, N_("token"), N_("command token to send to the server")), + + OPT_END() + }; + + if (argc < 2) + usage_with_options(simple_ipc_usage, options); + + if (argc == 2 && !strcmp(argv[1], "-h")) + usage_with_options(simple_ipc_usage, options); + + if (argc == 2 && !strcmp(argv[1], "SUPPORTS_SIMPLE_IPC")) + return 0; + + cl_args.subcommand = argv[1]; + + argc--; + argv++; + + argc = parse_options(argc, argv, NULL, options, simple_ipc_usage, 0); + + if (cl_args.nr_threads < 1) + cl_args.nr_threads = 1; + if (cl_args.max_wait_sec < 0) + cl_args.max_wait_sec = 0; + if (cl_args.bytecount < 1) + cl_args.bytecount = 1; + if (cl_args.batchsize < 1) + cl_args.batchsize = 1; + + if (bytevalue && *bytevalue) + cl_args.bytevalue = bytevalue[0]; + + /* + * Use '!!' on all dispatch functions to map from `error()` style + * (returns -1) style to `test_must_fail` style (expects 1). This + * makes shell error messages less confusing. + */ + + if (!strcmp(cl_args.subcommand, "is-active")) + return !!client__probe_server(); + + if (!strcmp(cl_args.subcommand, "run-daemon")) + return !!daemon__run_server(); + + if (!strcmp(cl_args.subcommand, "start-daemon")) + return !!daemon__start_server(); + + /* + * Client commands follow. Ensure a server is running before + * sending any data. This might be overkill, but then again + * this is a test harness. + */ + + if (!strcmp(cl_args.subcommand, "stop-daemon")) { + if (client__probe_server()) + return 1; + return !!client__stop_server(); + } + + if (!strcmp(cl_args.subcommand, "send")) { + if (client__probe_server()) + return 1; + return !!client__send_ipc(); + } + + if (!strcmp(cl_args.subcommand, "sendbytes")) { + if (client__probe_server()) + return 1; + return !!client__sendbytes(); + } + + if (!strcmp(cl_args.subcommand, "multiple")) { + if (client__probe_server()) + return 1; + return !!client__multiple(); + } + + die("Unhandled subcommand: '%s'", cl_args.subcommand); +} +#endif diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c index f97cd9f48a..287aa60023 100644 --- a/t/helper/test-tool.c +++ b/t/helper/test-tool.c @@ -65,6 +65,7 @@ static struct test_cmd cmds[] = { { "sha1", cmd__sha1 }, { "sha256", cmd__sha256 }, { "sigchain", cmd__sigchain }, + { "simple-ipc", cmd__simple_ipc }, { "strcmp-offset", cmd__strcmp_offset }, { "string-list", cmd__string_list }, { "submodule-config", cmd__submodule_config }, diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h index 28072c0ad5..9ea4b31011 100644 --- a/t/helper/test-tool.h +++ b/t/helper/test-tool.h @@ -55,6 +55,7 @@ int cmd__sha1(int argc, const char **argv); int cmd__oid_array(int argc, const char **argv); int cmd__sha256(int argc, const char **argv); int cmd__sigchain(int argc, const char **argv); +int cmd__simple_ipc(int argc, const char **argv); int cmd__strcmp_offset(int argc, const char **argv); int cmd__string_list(int argc, const char **argv); int cmd__submodule_config(int argc, const char **argv); diff --git a/t/lib-rebase.sh b/t/lib-rebase.sh index 172d7459ff..dc75b83451 100644 --- a/t/lib-rebase.sh +++ b/t/lib-rebase.sh @@ -4,6 +4,7 @@ # # - override the commit message with $FAKE_COMMIT_MESSAGE # - amend the commit message with $FAKE_COMMIT_AMEND +# - copy the original commit message to a file with $FAKE_MESSAGE_COPY # - check that non-commit messages have a certain line count with $EXPECT_COUNT # - check the commit count in the commit message header with $EXPECT_HEADER_COUNT # - rewrite a rebase -i script as directed by $FAKE_LINES. @@ -14,10 +15,11 @@ # specified line. # # "<cmd> <lineno>" -- add a line with the specified command -# ("pick", "squash", "fixup", "edit", "reword" or "drop") and the -# SHA1 taken from the specified line. +# ("pick", "squash", "fixup"|"fixup_-C"|"fixup_-c", "edit", "reword" or "drop") +# and the SHA1 taken from the specified line. # -# "exec_cmd_with_args" -- add an "exec cmd with args" line. +# "_" -- add a space, like "fixup_-C" implies "fixup -C" and +# "exec_cmd_with_args" add an "exec cmd with args" line. # # "#" -- Add a comment line. # @@ -32,6 +34,7 @@ set_fake_editor () { exit test -z "$FAKE_COMMIT_MESSAGE" || echo "$FAKE_COMMIT_MESSAGE" > "$1" test -z "$FAKE_COMMIT_AMEND" || echo "$FAKE_COMMIT_AMEND" >> "$1" + test -z "$FAKE_MESSAGE_COPY" || cat "$1" >"$FAKE_MESSAGE_COPY" exit ;; esac @@ -50,6 +53,8 @@ set_fake_editor () { action="$line";; exec_*|x_*|break|b) echo "$line" | sed 's/_/ /g' >> "$1";; + merge_*|fixup_*) + action=$(echo "$line" | sed 's/_/ /g');; "#") echo '# comment' >> "$1";; ">") diff --git a/t/perf/p5303-many-packs.sh b/t/perf/p5303-many-packs.sh index ce0c42cc9f..35c0cbdf49 100755 --- a/t/perf/p5303-many-packs.sh +++ b/t/perf/p5303-many-packs.sh @@ -28,11 +28,18 @@ repack_into_n () { push @commits, $_ if $. % 5 == 1; } print reverse @commits; - ' "$1" >pushes + ' "$1" >pushes && # create base packfile - head -n 1 pushes | - git pack-objects --delta-base-offset --revs staging/pack + base_pack=$( + head -n 1 pushes | + git pack-objects --delta-base-offset --revs staging/pack + ) && + test_export base_pack && + + # create an empty packfile + empty_pack=$(git pack-objects staging/pack </dev/null) && + test_export empty_pack && # and then incrementals between each pair of commits last= && @@ -49,6 +56,12 @@ repack_into_n () { last=$rev done <pushes && + ( + find staging -type f -name 'pack-*.pack' | + xargs -n 1 basename | grep -v "$base_pack" && + printf "^pack-%s.pack\n" $base_pack + ) >stdin.packs + # and install the whole thing rm -f .git/objects/pack/* && mv staging/* .git/objects/pack/ @@ -91,6 +104,23 @@ do --reflog --indexed-objects --delta-base-offset \ --stdout </dev/null >/dev/null ' + + test_perf "repack with kept ($nr_packs)" ' + git pack-objects --keep-true-parents \ + --keep-pack=pack-$empty_pack.pack \ + --honor-pack-keep --non-empty --all \ + --reflog --indexed-objects --delta-base-offset \ + --stdout </dev/null >/dev/null + ' + + test_perf "repack with --stdin-packs ($nr_packs)" ' + git pack-objects \ + --keep-true-parents \ + --stdin-packs \ + --non-empty \ + --delta-base-offset \ + --stdout <stdin.packs >/dev/null + ' done # Measure pack loading with 10,000 packs. diff --git a/t/perf/p7519-fsmonitor.sh b/t/perf/p7519-fsmonitor.sh index b657564aed..5eb5044a10 100755 --- a/t/perf/p7519-fsmonitor.sh +++ b/t/perf/p7519-fsmonitor.sh @@ -216,6 +216,10 @@ test_fsmonitor_suite() { git diff ' + test_perf_w_drop_caches "diff HEAD ($DESC)" ' + git diff HEAD + ' + test_perf_w_drop_caches "diff -- 0_files ($DESC)" ' git diff -- 1_file ' diff --git a/t/t0052-simple-ipc.sh b/t/t0052-simple-ipc.sh new file mode 100755 index 0000000000..ff98be31a5 --- /dev/null +++ b/t/t0052-simple-ipc.sh @@ -0,0 +1,122 @@ +#!/bin/sh + +test_description='simple command server' + +. ./test-lib.sh + +test-tool simple-ipc SUPPORTS_SIMPLE_IPC || { + skip_all='simple IPC not supported on this platform' + test_done +} + +stop_simple_IPC_server () { + test-tool simple-ipc stop-daemon +} + +test_expect_success 'start simple command server' ' + test_atexit stop_simple_IPC_server && + test-tool simple-ipc start-daemon --threads=8 && + test-tool simple-ipc is-active +' + +test_expect_success 'simple command server' ' + test-tool simple-ipc send --token=ping >actual && + echo pong >expect && + test_cmp expect actual +' + +test_expect_success 'servers cannot share the same path' ' + test_must_fail test-tool simple-ipc run-daemon && + test-tool simple-ipc is-active +' + +test_expect_success 'big response' ' + test-tool simple-ipc send --token=big >actual && + test_line_count -ge 10000 actual && + grep -q "big: [0]*9999\$" actual +' + +test_expect_success 'chunk response' ' + test-tool simple-ipc send --token=chunk >actual && + test_line_count -ge 10000 actual && + grep -q "big: [0]*9999\$" actual +' + +test_expect_success 'slow response' ' + test-tool simple-ipc send --token=slow >actual && + test_line_count -ge 100 actual && + grep -q "big: [0]*99\$" actual +' + +# Send an IPC with n=100,000 bytes of ballast. This should be large enough +# to force both the kernel and the pkt-line layer to chunk the message to the +# daemon and for the daemon to receive it in chunks. +# +test_expect_success 'sendbytes' ' + test-tool simple-ipc sendbytes --bytecount=100000 --byte=A >actual && + grep "sent:A00100000 rcvd:A00100000" actual +' + +# Start a series of <threads> client threads that each make <batchsize> +# IPC requests to the server. Each (<threads> * <batchsize>) request +# will open a new connection to the server and randomly bind to a server +# thread. Each client thread exits after completing its batch. So the +# total number of live client threads will be smaller than the total. +# Each request will send a message containing at least <bytecount> bytes +# of ballast. (Responses are small.) +# +# The purpose here is to test threading in the server and responding to +# many concurrent client requests (regardless of whether they come from +# 1 client process or many). And to test that the server side of the +# named pipe/socket is stable. (On Windows this means that the server +# pipe is properly recycled.) +# +# On Windows it also lets us adjust the connection timeout in the +# `ipc_client_send_command()`. +# +# Note it is easy to drive the system into failure by requesting an +# insane number of threads on client or server and/or increasing the +# per-thread batchsize or the per-request bytecount (ballast). +# On Windows these failures look like "pipe is busy" errors. +# So I've chosen fairly conservative values for now. +# +# We expect output of the form "sent:<letter><length> ..." +# With terms (7, 19, 13) we expect: +# <letter> in [A-G] +# <length> in [19+0 .. 19+(13-1)] +# and (7 * 13) successful responses. +# +test_expect_success 'stress test threads' ' + test-tool simple-ipc multiple \ + --threads=7 \ + --bytecount=19 \ + --batchsize=13 \ + >actual && + test_line_count = 92 actual && + grep "good 91" actual && + grep "sent:A" <actual >actual_a && + cat >expect_a <<-EOF && + sent:A00000019 rcvd:A00000019 + sent:A00000020 rcvd:A00000020 + sent:A00000021 rcvd:A00000021 + sent:A00000022 rcvd:A00000022 + sent:A00000023 rcvd:A00000023 + sent:A00000024 rcvd:A00000024 + sent:A00000025 rcvd:A00000025 + sent:A00000026 rcvd:A00000026 + sent:A00000027 rcvd:A00000027 + sent:A00000028 rcvd:A00000028 + sent:A00000029 rcvd:A00000029 + sent:A00000030 rcvd:A00000030 + sent:A00000031 rcvd:A00000031 + EOF + test_cmp expect_a actual_a +' + +test_expect_success 'stop-daemon works' ' + test-tool simple-ipc stop-daemon && + test_must_fail test-tool simple-ipc is-active && + test_must_fail test-tool simple-ipc send --token=ping +' + +test_done diff --git a/t/t2021-checkout-overwrite.sh b/t/t2021-checkout-overwrite.sh index c2ada7de37..70d69263e6 100755 --- a/t/t2021-checkout-overwrite.sh +++ b/t/t2021-checkout-overwrite.sh @@ -51,4 +51,16 @@ test_expect_success SYMLINKS 'the symlink remained' ' test -h a/b ' +test_expect_success SYMLINKS 'checkout -f must not follow symlinks when removing entries' ' + git checkout -f start && + mkdir dir && + >dir/f && + git add dir/f && + git commit -m "add dir/f" && + mv dir untracked && + ln -s untracked dir && + git checkout -f HEAD~ && + test_path_is_file untracked/f +' + test_done diff --git a/t/t3060-ls-files-with-tree.sh b/t/t3060-ls-files-with-tree.sh index 52ed665fcd..b257c792a4 100755 --- a/t/t3060-ls-files-with-tree.sh +++ b/t/t3060-ls-files-with-tree.sh @@ -47,6 +47,12 @@ test_expect_success setup ' git add . ' +test_expect_success 'usage' ' + test_expect_code 128 git ls-files --with-tree=HEAD -u && + test_expect_code 128 git ls-files --with-tree=HEAD -s && + test_expect_code 128 git ls-files --recurse-submodules --with-tree=HEAD +' + test_expect_success 'git ls-files --with-tree should succeed from subdir' ' # We have to run from a sub-directory to trigger prune_path # Then we finally get to run our --with-tree test @@ -60,4 +66,39 @@ test_expect_success \ 'git ls-files --with-tree should add entries from named tree.' \ 'test_cmp expected output' +test_expect_success 'no duplicates in --with-tree output' ' + git ls-files --with-tree=HEAD >actual && + sort -u actual >expected && + test_cmp expected actual +' + +test_expect_success 'setup: output in a conflict' ' + test_create_repo conflict && + test_commit -C conflict BASE file && + test_commit -C conflict A file foo && + git -C conflict reset --hard BASE && + test_commit -C conflict B file bar +' + +test_expect_success 'output in a conflict' ' + test_must_fail git -C conflict merge A B && + cat >expected <<-\EOF && + file + file + file + file + EOF + git -C conflict ls-files --with-tree=HEAD >actual && + test_cmp expected actual +' + +test_expect_success 'output with removed .git/index' ' + cat >expected <<-\EOF && + file + EOF + rm conflict/.git/index && + git -C conflict ls-files --with-tree=HEAD >actual && + test_cmp expected actual +' + test_done diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh index 1b26c4c2ef..e30bc48a29 100755 --- a/t/t3206-range-diff.sh +++ b/t/t3206-range-diff.sh @@ -521,6 +521,30 @@ test_expect_success 'format-patch --range-diff as commentary' ' grep "> 1: .* new message" 0001-* ' +test_expect_success 'format-patch --range-diff reroll-count with a non-integer' ' + git format-patch --range-diff=HEAD~1 -v2.9 HEAD~1 >actual && + test_when_finished "rm v2.9-0001-*" && + test_line_count = 1 actual && + test_i18ngrep "^Range-diff:$" v2.9-0001-* && + grep "> 1: .* new message" v2.9-0001-* +' + +test_expect_success 'format-patch --range-diff reroll-count with a integer' ' + git format-patch --range-diff=HEAD~1 -v2 HEAD~1 >actual && + test_when_finished "rm v2-0001-*" && + test_line_count = 1 actual && + test_i18ngrep "^Range-diff ..* v1:$" v2-0001-* && + grep "> 1: .* new message" v2-0001-* +' + +test_expect_success 'format-patch --range-diff with v0' ' + git format-patch --range-diff=HEAD~1 -v0 HEAD~1 >actual && + test_when_finished "rm v0-0001-*" && + test_line_count = 1 actual && + test_i18ngrep "^Range-diff:$" v0-0001-* && + grep "> 1: .* new message" v0-0001-* +' + test_expect_success 'range-diff overrides diff.noprefix internally' ' git -c diff.noprefix=true range-diff HEAD^... ' diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh index 587b408063..0bb88aa982 100755 --- a/t/t3400-rebase.sh +++ b/t/t3400-rebase.sh @@ -388,22 +388,6 @@ test_expect_success 'rebase--merge.sh and --show-current-patch' ' ) ' -test_expect_success 'rebase -c rebase.useBuiltin=false warning' ' - expected="rebase.useBuiltin support has been removed" && - - # Only warn when the legacy rebase is requested... - test_must_fail git -c rebase.useBuiltin=false rebase 2>err && - test_i18ngrep "$expected" err && - test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=false git rebase 2>err && - test_i18ngrep "$expected" err && - - # ...not when we would have used the built-in anyway - test_must_fail git -c rebase.useBuiltin=true rebase 2>err && - test_must_be_empty err && - test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true git rebase 2>err && - test_must_be_empty err -' - test_expect_success 'switch to branch checked out here' ' git checkout main && git rebase main main diff --git a/t/t3415-rebase-autosquash.sh b/t/t3415-rebase-autosquash.sh index 908016c2f8..78c27496d6 100755 --- a/t/t3415-rebase-autosquash.sh +++ b/t/t3415-rebase-autosquash.sh @@ -84,8 +84,7 @@ test_auto_squash () { echo 1 >file1 && git add -u && test_tick && - git commit -m "squash! first" && - + git commit -m "squash! first" -m "extra para for first" && git tag $1 && test_tick && git rebase $2 -i HEAD^^^ && @@ -142,7 +141,7 @@ test_expect_success 'auto squash that matches 2 commits' ' echo 1 >file1 && git add -u && test_tick && - git commit -m "squash! first" && + git commit -m "squash! first" -m "extra para for first" && git tag final-multisquash && test_tick && git rebase --autosquash -i HEAD~4 && @@ -195,7 +194,7 @@ test_expect_success 'auto squash that matches a sha1' ' git add -u && test_tick && oid=$(git rev-parse --short HEAD^) && - git commit -m "squash! $oid" && + git commit -m "squash! $oid" -m "extra para" && git tag final-shasquash && test_tick && git rebase --autosquash -i HEAD^^^ && @@ -206,7 +205,8 @@ test_expect_success 'auto squash that matches a sha1' ' git cat-file blob HEAD^:file1 >actual && test_cmp expect actual && git cat-file commit HEAD^ >commit && - grep squash commit >actual && + ! grep "squash" commit && + grep "^extra para" commit >actual && test_line_count = 1 actual ' @@ -216,7 +216,7 @@ test_expect_success 'auto squash that matches longer sha1' ' git add -u && test_tick && oid=$(git rev-parse --short=11 HEAD^) && - git commit -m "squash! $oid" && + git commit -m "squash! $oid" -m "extra para" && git tag final-longshasquash && test_tick && git rebase --autosquash -i HEAD^^^ && @@ -227,7 +227,8 @@ test_expect_success 'auto squash that matches longer sha1' ' git cat-file blob HEAD^:file1 >actual && test_cmp expect actual && git cat-file commit HEAD^ >commit && - grep squash commit >actual && + ! grep "squash" commit && + grep "^extra para" commit >actual && test_line_count = 1 actual ' @@ -236,7 +237,7 @@ test_auto_commit_flags () { echo 1 >file1 && git add -u && test_tick && - git commit --$1 first-commit && + git commit --$1 first-commit -m "extra para for first" && git tag final-commit-$1 && test_tick && git rebase --autosquash -i HEAD^^^ && @@ -264,11 +265,11 @@ test_auto_fixup_fixup () { echo 1 >file1 && git add -u && test_tick && - git commit -m "$1! first" && + git commit -m "$1! first" -m "extra para for first" && echo 2 >file1 && git add -u && test_tick && - git commit -m "$1! $2! first" && + git commit -m "$1! $2! first" -m "second extra para for first" && git tag "final-$1-$2" && test_tick && ( @@ -329,12 +330,12 @@ test_expect_success 'autosquash with custom inst format' ' git add -u && test_tick && oid=$(git rev-parse --short HEAD^) && - git commit -m "squash! $oid" && + git commit -m "squash! $oid" -m "extra para for first" && echo 1 >file1 && git add -u && test_tick && subject=$(git log -n 1 --format=%s HEAD~2) && - git commit -m "squash! $subject" && + git commit -m "squash! $subject" -m "second extra para for first" && git tag final-squash-instFmt && test_tick && git rebase --autosquash -i HEAD~4 && @@ -345,8 +346,9 @@ test_expect_success 'autosquash with custom inst format' ' git cat-file blob HEAD^:file1 >actual && test_cmp expect actual && git cat-file commit HEAD^ >commit && - grep squash commit >actual && - test_line_count = 2 actual + ! grep "squash" commit && + grep first commit >actual && + test_line_count = 3 actual ' test_expect_success 'autosquash with empty custom instructionFormat' ' diff --git a/t/t3437-rebase-fixup-options.sh b/t/t3437-rebase-fixup-options.sh new file mode 100755 index 0000000000..d0bdc7ed02 --- /dev/null +++ b/t/t3437-rebase-fixup-options.sh @@ -0,0 +1,211 @@ +#!/bin/sh +# +# Copyright (c) 2018 Phillip Wood +# + +test_description='git rebase interactive fixup options + +This test checks the "fixup [-C|-c]" command of rebase interactive. +In addition to amending the contents of the commit, "fixup -C" +replaces the original commit message with the message of the fixup +commit. "fixup -c" also replaces the original message, but opens the +editor to allow the user to edit the message before committing. Similar +to the "fixup" command that works with "fixup!", "fixup -C" works with +"amend!" upon --autosquash. +' + +. ./test-lib.sh + +. "$TEST_DIRECTORY"/lib-rebase.sh + +EMPTY="" + +# test_commit_message <rev> -m <msg> +# test_commit_message <rev> <path> +# Verify that the commit message of <rev> matches +# <msg> or the content of <path>. +test_commit_message () { + git show --no-patch --pretty=format:%B "$1" >actual && + case "$2" in + -m) + echo "$3" >expect && + test_cmp expect actual ;; + *) + test_cmp "$2" actual ;; + esac +} + +get_author () { + rev="$1" && + git log -1 --pretty=format:"%an %ae %at" "$rev" +} + +test_expect_success 'setup' ' + cat >message <<-EOF && + amend! B + $EMPTY + new subject + $EMPTY + new + body + EOF + + test_commit A A && + test_commit B B && + get_author HEAD >expected-author && + ORIG_AUTHOR_NAME="$GIT_AUTHOR_NAME" && + ORIG_AUTHOR_EMAIL="$GIT_AUTHOR_EMAIL" && + GIT_AUTHOR_NAME="Amend Author" && + GIT_AUTHOR_EMAIL="amend@example.com" && + test_commit "$(cat message)" A A1 A1 && + test_commit A2 A && + test_commit A3 A && + GIT_AUTHOR_NAME="$ORIG_AUTHOR_NAME" && + GIT_AUTHOR_EMAIL="$ORIG_AUTHOR_EMAIL" && + git checkout -b conflicts-branch A && + test_commit conflicts A && + + set_fake_editor && + git checkout -b branch B && + echo B1 >B && + test_tick && + git commit --fixup=HEAD -a && + git tag B1 && + test_tick && + FAKE_COMMIT_AMEND="edited 1" git commit --fixup=reword:B && + test_tick && + FAKE_COMMIT_AMEND="edited 2" git commit --fixup=reword:HEAD && + echo B2 >B && + test_tick && + FAKE_COMMIT_AMEND="edited squash" git commit --squash=HEAD -a && + git tag B2 && + echo B3 >B && + test_tick && + FAKE_COMMIT_AMEND="edited 3" git commit -a --fixup=amend:HEAD^ && + git tag B3 && + + GIT_AUTHOR_NAME="Rebase Author" && + GIT_AUTHOR_EMAIL="rebase.author@example.com" && + GIT_COMMITTER_NAME="Rebase Committer" && + GIT_COMMITTER_EMAIL="rebase.committer@example.com" +' + +test_expect_success 'simple fixup -C works' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach A2 && + FAKE_LINES="1 fixup_-C 2" git rebase -i B && + test_cmp_rev HEAD^ B && + test_cmp_rev HEAD^{tree} A2^{tree} && + test_commit_message HEAD -m "A2" +' + +test_expect_success 'simple fixup -c works' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach A2 && + git log -1 --pretty=format:%B >expected-fixup-message && + test_write_lines "" "Modified A2" >>expected-fixup-message && + FAKE_LINES="1 fixup_-c 2" \ + FAKE_COMMIT_AMEND="Modified A2" \ + git rebase -i B && + test_cmp_rev HEAD^ B && + test_cmp_rev HEAD^{tree} A2^{tree} && + test_commit_message HEAD expected-fixup-message +' + +test_expect_success 'fixup -C removes amend! from message' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach A1 && + git log -1 --pretty=format:%b >expected-message && + FAKE_LINES="1 fixup_-C 2" git rebase -i A && + test_cmp_rev HEAD^ A && + test_cmp_rev HEAD^{tree} A1^{tree} && + test_commit_message HEAD expected-message && + get_author HEAD >actual-author && + test_cmp expected-author actual-author +' + +test_expect_success 'fixup -C with conflicts gives correct message' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach A1 && + git log -1 --pretty=format:%b >expected-message && + test_write_lines "" "edited" >>expected-message && + test_must_fail env FAKE_LINES="1 fixup_-C 2" git rebase -i conflicts && + git checkout --theirs -- A && + git add A && + FAKE_COMMIT_AMEND=edited git rebase --continue && + test_cmp_rev HEAD^ conflicts && + test_cmp_rev HEAD^{tree} A1^{tree} && + test_commit_message HEAD expected-message && + get_author HEAD >actual-author && + test_cmp expected-author actual-author +' + +test_expect_success 'skipping fixup -C after fixup gives correct message' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach A3 && + test_must_fail env FAKE_LINES="1 fixup 2 fixup_-C 4" git rebase -i A && + git reset --hard && + FAKE_COMMIT_AMEND=edited git rebase --continue && + test_commit_message HEAD -m "B" +' + +test_expect_success 'sequence of fixup, fixup -C & squash --signoff works' ' + git checkout --detach B3 && + FAKE_LINES="1 fixup 2 fixup_-C 3 fixup_-C 4 squash 5 fixup_-C 6" \ + FAKE_COMMIT_AMEND=squashed \ + FAKE_MESSAGE_COPY=actual-squash-message \ + git -c commit.status=false rebase -ik --signoff A && + git diff-tree --exit-code --patch HEAD B3 -- && + test_cmp_rev HEAD^ A && + test_i18ncmp "$TEST_DIRECTORY/t3437/expected-squash-message" \ + actual-squash-message +' + +test_expect_success 'first fixup -C commented out in sequence fixup fixup -C fixup -C' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach B2~ && + git log -1 --pretty=format:%b >expected-message && + FAKE_LINES="1 fixup 2 fixup_-C 3 fixup_-C 4" git rebase -i A && + test_cmp_rev HEAD^ A && + test_commit_message HEAD expected-message +' + +test_expect_success 'multiple fixup -c opens editor once' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach A3 && + git log -1 --pretty=format:%B >expected-message && + test_write_lines "" "Modified-A3" >>expected-message && + FAKE_COMMIT_AMEND="Modified-A3" \ + FAKE_LINES="1 fixup_-C 2 fixup_-c 3 fixup_-c 4" \ + EXPECT_HEADER_COUNT=4 \ + git rebase -i A && + test_cmp_rev HEAD^ A && + get_author HEAD >actual-author && + test_cmp expected-author actual-author && + test_commit_message HEAD expected-message +' + +test_expect_success 'sequence squash, fixup & fixup -c gives combined message' ' + test_when_finished "test_might_fail git rebase --abort" && + git checkout --detach A3 && + FAKE_LINES="1 squash 2 fixup 3 fixup_-c 4" \ + FAKE_MESSAGE_COPY=actual-combined-message \ + git -c commit.status=false rebase -i A && + test_i18ncmp "$TEST_DIRECTORY/t3437/expected-combined-message" \ + actual-combined-message && + test_cmp_rev HEAD^ A +' + +test_expect_success 'fixup -C works upon --autosquash with amend!' ' + git checkout --detach B3 && + FAKE_COMMIT_AMEND=squashed \ + FAKE_MESSAGE_COPY=actual-squash-message \ + git -c commit.status=false rebase -ik --autosquash \ + --signoff A && + git diff-tree --exit-code --patch HEAD B3 -- && + test_cmp_rev HEAD^ A && + test_i18ncmp "$TEST_DIRECTORY/t3437/expected-squash-message" \ + actual-squash-message +' + +test_done diff --git a/t/t3437/expected-combined-message b/t/t3437/expected-combined-message new file mode 100644 index 0000000000..a26cfb2fa9 --- /dev/null +++ b/t/t3437/expected-combined-message @@ -0,0 +1,21 @@ +# This is a combination of 4 commits. +# This is the 1st commit message: + +B + +# This is the commit message #2: + +# amend! B + +new subject + +new +body + +# The commit message #3 will be skipped: + +# A2 + +# This is the commit message #4: + +A3 diff --git a/t/t3437/expected-squash-message b/t/t3437/expected-squash-message new file mode 100644 index 0000000000..ab2434f90e --- /dev/null +++ b/t/t3437/expected-squash-message @@ -0,0 +1,51 @@ +# This is a combination of 6 commits. +# The 1st commit message will be skipped: + +# B +# +# Signed-off-by: Rebase Committer <rebase.committer@example.com> + +# The commit message #2 will be skipped: + +# fixup! B + +# The commit message #3 will be skipped: + +# amend! B +# +# B +# +# edited 1 +# +# Signed-off-by: Rebase Committer <rebase.committer@example.com> + +# This is the commit message #4: + +# amend! amend! B + +B + +edited 1 + +edited 2 + +Signed-off-by: Rebase Committer <rebase.committer@example.com> + +# This is the commit message #5: + +# squash! amend! amend! B + +edited squash + +# This is the commit message #6: + +# amend! amend! amend! B + +B + +edited 1 + +edited 2 + +edited 3 +squashed diff --git a/t/t3800-mktag.sh b/t/t3800-mktag.sh index 60a666da59..6275c98523 100755 --- a/t/t3800-mktag.sh +++ b/t/t3800-mktag.sh @@ -17,7 +17,7 @@ check_verify_failure () { grep '$2' message && if test '$3' != '--no-strict' then - test_must_fail git mktag --no-strict <tag.sig 2>message.no-strict &&xb + test_must_fail git mktag --no-strict <tag.sig 2>message.no-strict && grep '$2' message.no-strict fi " diff --git a/t/t3900-i18n-commit.sh b/t/t3900-i18n-commit.sh index d277a9f4b7..bfab245eb3 100755 --- a/t/t3900-i18n-commit.sh +++ b/t/t3900-i18n-commit.sh @@ -226,10 +226,6 @@ test_commit_autosquash_multi_encoding () { git rev-list HEAD >actual && test_line_count = 3 actual && iconv -f $old -t UTF-8 "$TEST_DIRECTORY"/t3900/$msg >expect && - if test $flag = squash; then - subject="$(head -1 expect)" && - printf "\nsquash! %s\n" "$subject" >>expect - fi && git cat-file commit HEAD^ >raw && (sed "1,/^$/d" raw | iconv -f $new -t utf-8) >actual && test_cmp expect actual diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh index cdd3154e70..712d4b5ddf 100755 --- a/t/t4014-format-patch.sh +++ b/t/t4014-format-patch.sh @@ -386,6 +386,30 @@ test_expect_success 'reroll count (-v)' ' ! grep -v "^Subject: \[PATCH v4 [0-3]/3\] " subjects ' +test_expect_success 'reroll count (-v) with a fractional number' ' + rm -fr patches && + git format-patch -o patches --cover-letter -v4.4 main..side >list && + ! grep -v "^patches/v4.4-000[0-3]-" list && + sed -n -e "/^Subject: /p" $(cat list) >subjects && + ! grep -v "^Subject: \[PATCH v4.4 [0-3]/3\] " subjects +' + +test_expect_success 'reroll (-v) count with a non number' ' + rm -fr patches && + git format-patch -o patches --cover-letter -v4rev2 main..side >list && + ! grep -v "^patches/v4rev2-000[0-3]-" list && + sed -n -e "/^Subject: /p" $(cat list) >subjects && + ! grep -v "^Subject: \[PATCH v4rev2 [0-3]/3\] " subjects +' + +test_expect_success 'reroll (-v) count with a non-pathname character' ' + rm -fr patches && + git format-patch -o patches --cover-letter -v4---..././../--1/.2// main..side >list && + ! grep -v "patches/v4-\.-\.-\.-1-\.2-000[0-3]-" list && + sed -n -e "/^Subject: /p" $(cat list) >subjects && + ! grep -v "^Subject: \[PATCH v4---\.\.\./\./\.\./--1/\.2// [0-3]/3\] " subjects +' + check_threading () { expect="$1" && shift && @@ -2255,6 +2279,16 @@ test_expect_success 'interdiff: reroll-count' ' test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch ' +test_expect_success 'interdiff: reroll-count with a non-integer' ' + git format-patch --cover-letter --interdiff=boop~2 -v2.2 -1 boop && + test_i18ngrep "^Interdiff:$" v2.2-0000-cover-letter.patch +' + +test_expect_success 'interdiff: reroll-count with a integer' ' + git format-patch --cover-letter --interdiff=boop~2 -v2 -1 boop && + test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch +' + test_expect_success 'interdiff: solo-patch' ' cat >expect <<-\EOF && +fleep diff --git a/t/t4053-diff-no-index.sh b/t/t4053-diff-no-index.sh index 0168946b63..3feadf0e35 100755 --- a/t/t4053-diff-no-index.sh +++ b/t/t4053-diff-no-index.sh @@ -16,6 +16,11 @@ test_expect_success 'setup' ' echo 1 >non/git/b ' +test_expect_success 'git diff --no-index --exit-code' ' + git diff --no-index --exit-code a/1 non/git/a && + test_expect_code 1 git diff --no-index --exit-code a/1 a/2 +' + test_expect_success 'git diff --no-index directories' ' test_expect_code 1 git diff --no-index a b >cnt && test_line_count = 14 cnt @@ -144,4 +149,59 @@ test_expect_success 'diff --no-index allows external diff' ' test_cmp expect actual ' +test_expect_success 'diff --no-index normalizes mode: no changes' ' + echo foo >x && + cp x y && + git diff --no-index x y >out && + test_must_be_empty out +' + +test_expect_success POSIXPERM 'diff --no-index normalizes mode: chmod +x' ' + chmod +x y && + cat >expected <<-\EOF && + diff --git a/x b/y + old mode 100644 + new mode 100755 + EOF + test_expect_code 1 git diff --no-index x y >actual && + test_cmp expected actual +' + +test_expect_success POSIXPERM 'diff --no-index normalizes: mode not like git mode' ' + chmod 666 x && + chmod 777 y && + cat >expected <<-\EOF && + diff --git a/x b/y + old mode 100644 + new mode 100755 + EOF + test_expect_code 1 git diff --no-index x y >actual && + test_cmp expected actual +' + +test_expect_success POSIXPERM,SYMLINKS 'diff --no-index normalizes: mode not like git mode (symlink)' ' + ln -s y z && + X_OID=$(git hash-object --stdin <x) && + Z_OID=$(printf y | git hash-object --stdin) && + cat >expected <<-EOF && + diff --git a/x b/x + deleted file mode 100644 + index $X_OID..$ZERO_OID + --- a/x + +++ /dev/null + @@ -1 +0,0 @@ + -foo + diff --git a/z b/z + new file mode 120000 + index $ZERO_OID..$Z_OID + --- /dev/null + +++ b/z + @@ -0,0 +1 @@ + +y + \ No newline at end of file + EOF + test_expect_code 1 git -c core.abbrev=no diff --no-index x z >actual && + test_cmp expected actual +' + test_done diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh index d586fdc7a9..2fc5e68250 100755 --- a/t/t5300-pack-object.sh +++ b/t/t5300-pack-object.sh @@ -427,7 +427,8 @@ test_expect_success 'index-pack --strict <pack> works in non-repo' ' test_path_is_file foo.idx ' -test_expect_success !PTHREADS 'index-pack --threads=N or pack.threads=N warns when no pthreads' ' +test_expect_success !PTHREADS,!FAIL_PREREQS \ + 'index-pack --threads=N or pack.threads=N warns when no pthreads' ' test_must_fail git index-pack --threads=2 2>err && grep ^warning: err >warnings && test_line_count = 1 warnings && @@ -445,7 +446,8 @@ test_expect_success !PTHREADS 'index-pack --threads=N or pack.threads=N warns wh grep -F "no threads support, ignoring pack.threads" err ' -test_expect_success !PTHREADS 'pack-objects --threads=N or pack.threads=N warns when no pthreads' ' +test_expect_success !PTHREADS,!FAIL_PREREQS \ + 'pack-objects --threads=N or pack.threads=N warns when no pthreads' ' git pack-objects --threads=2 --stdout --all </dev/null >/dev/null 2>err && grep ^warning: err >warnings && test_line_count = 1 warnings && @@ -532,4 +534,139 @@ test_expect_success 'prefetch objects' ' test_line_count = 1 donelines ' +test_expect_success 'setup for --stdin-packs tests' ' + git init stdin-packs && + ( + cd stdin-packs && + + test_commit A && + test_commit B && + test_commit C && + + for id in A B C + do + git pack-objects .git/objects/pack/pack-$id \ + --incremental --revs <<-EOF + refs/tags/$id + EOF + done && + + ls -la .git/objects/pack + ) +' + +test_expect_success '--stdin-packs with excluded packs' ' + ( + cd stdin-packs && + + PACK_A="$(basename .git/objects/pack/pack-A-*.pack)" && + PACK_B="$(basename .git/objects/pack/pack-B-*.pack)" && + PACK_C="$(basename .git/objects/pack/pack-C-*.pack)" && + + git pack-objects test --stdin-packs <<-EOF && + $PACK_A + ^$PACK_B + $PACK_C + EOF + + ( + git show-index <$(ls .git/objects/pack/pack-A-*.idx) && + git show-index <$(ls .git/objects/pack/pack-C-*.idx) + ) >expect.raw && + git show-index <$(ls test-*.idx) >actual.raw && + + cut -d" " -f2 <expect.raw | sort >expect && + cut -d" " -f2 <actual.raw | sort >actual && + test_cmp expect actual + ) +' + +test_expect_success '--stdin-packs is incompatible with --filter' ' + ( + cd stdin-packs && + test_must_fail git pack-objects --stdin-packs --stdout \ + --filter=blob:none </dev/null 2>err && + test_i18ngrep "cannot use --filter with --stdin-packs" err + ) +' + +test_expect_success '--stdin-packs is incompatible with --revs' ' + ( + cd stdin-packs && + test_must_fail git pack-objects --stdin-packs --revs out \ + </dev/null 2>err && + test_i18ngrep "cannot use internal rev list with --stdin-packs" err + ) +' + +test_expect_success '--stdin-packs with loose objects' ' + ( + cd stdin-packs && + + PACK_A="$(basename .git/objects/pack/pack-A-*.pack)" && + PACK_B="$(basename .git/objects/pack/pack-B-*.pack)" && + PACK_C="$(basename .git/objects/pack/pack-C-*.pack)" && + + test_commit D && # loose + + git pack-objects test2 --stdin-packs --unpacked <<-EOF && + $PACK_A + ^$PACK_B + $PACK_C + EOF + + ( + git show-index <$(ls .git/objects/pack/pack-A-*.idx) && + git show-index <$(ls .git/objects/pack/pack-C-*.idx) && + git rev-list --objects --no-object-names \ + refs/tags/C..refs/tags/D + + ) >expect.raw && + ls -la . && + git show-index <$(ls test2-*.idx) >actual.raw && + + cut -d" " -f2 <expect.raw | sort >expect && + cut -d" " -f2 <actual.raw | sort >actual && + test_cmp expect actual + ) +' + +test_expect_success '--stdin-packs with broken links' ' + ( + cd stdin-packs && + + # make an unreachable object with a bogus parent + git cat-file -p HEAD >commit && + sed "s/$(git rev-parse HEAD^)/$(test_oid zero)/" <commit | + git hash-object -w -t commit --stdin >in && + + git pack-objects .git/objects/pack/pack-D <in && + + PACK_A="$(basename .git/objects/pack/pack-A-*.pack)" && + PACK_B="$(basename .git/objects/pack/pack-B-*.pack)" && + PACK_C="$(basename .git/objects/pack/pack-C-*.pack)" && + PACK_D="$(basename .git/objects/pack/pack-D-*.pack)" && + + git pack-objects test3 --stdin-packs --unpacked <<-EOF && + $PACK_A + ^$PACK_B + $PACK_C + $PACK_D + EOF + + ( + git show-index <$(ls .git/objects/pack/pack-A-*.idx) && + git show-index <$(ls .git/objects/pack/pack-C-*.idx) && + git show-index <$(ls .git/objects/pack/pack-D-*.idx) && + git rev-list --objects --no-object-names \ + refs/tags/C..refs/tags/D + ) >expect.raw && + git show-index <$(ls test3-*.idx) >actual.raw && + + cut -d" " -f2 <expect.raw | sort >expect && + cut -d" " -f2 <actual.raw | sort >actual && + test_cmp expect actual + ) +' + test_done diff --git a/t/t6114-keep-packs.sh b/t/t6114-keep-packs.sh new file mode 100755 index 0000000000..9239d8aa46 --- /dev/null +++ b/t/t6114-keep-packs.sh @@ -0,0 +1,69 @@ +#!/bin/sh + +test_description='rev-list with .keep packs' +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit loose && + test_commit packed && + test_commit kept && + + KEPT_PACK=$(git pack-objects --revs .git/objects/pack/pack <<-EOF + refs/tags/kept + ^refs/tags/packed + EOF + ) && + MISC_PACK=$(git pack-objects --revs .git/objects/pack/pack <<-EOF + refs/tags/packed + ^refs/tags/loose + EOF + ) && + + touch .git/objects/pack/pack-$KEPT_PACK.keep +' + +rev_list_objects () { + git rev-list "$@" >out && + sort out +} + +idx_objects () { + git show-index <$1 >expect-idx && + cut -d" " -f2 <expect-idx | sort +} + +test_expect_success '--no-kept-objects excludes trees and blobs in .keep packs' ' + rev_list_objects --objects --all --no-object-names >kept && + rev_list_objects --objects --all --no-object-names --no-kept-objects >no-kept && + + idx_objects .git/objects/pack/pack-$KEPT_PACK.idx >expect && + comm -3 kept no-kept >actual && + + test_cmp expect actual +' + +test_expect_success '--no-kept-objects excludes kept non-MIDX object' ' + test_config core.multiPackIndex true && + + # Create a pack with just the commit object in pack, and do not mark it + # as kept (even though it appears in $KEPT_PACK, which does have a .keep + # file). + MIDX_PACK=$(git pack-objects .git/objects/pack/pack <<-EOF + $(git rev-parse kept) + EOF + ) && + + # Write a MIDX containing all packs, but use the version of the commit + # at "kept" in a non-kept pack by touching $MIDX_PACK. + touch .git/objects/pack/pack-$MIDX_PACK.pack && + git multi-pack-index write && + + rev_list_objects --objects --no-object-names --no-kept-objects HEAD >actual && + ( + idx_objects .git/objects/pack/pack-$MISC_PACK.idx && + git rev-list --objects --no-object-names refs/tags/loose + ) | sort >expect && + test_cmp expect actual +' + +test_done diff --git a/t/t7007-show.sh b/t/t7007-show.sh index 42d3db6246..d6cc69e0f2 100755 --- a/t/t7007-show.sh +++ b/t/t7007-show.sh @@ -38,6 +38,45 @@ test_expect_success 'showing two commits' ' test_cmp expect actual.filtered ' +test_expect_success 'showing a tree' ' + cat >expected <<-EOF && + tree main1: + + main1.t + EOF + git show main1: >actual && + test_cmp expected actual +' + +test_expect_success 'showing two trees' ' + cat >expected <<-EOF && + tree main1^{tree} + + main1.t + + tree main2^{tree} + + main1.t + main2.t + EOF + git show main1^{tree} main2^{tree} >actual && + test_cmp expected actual +' + +test_expect_success 'showing a trees is not recursive' ' + git worktree add not-recursive main1 && + mkdir not-recursive/a && + test_commit -C not-recursive a/file && + cat >expected <<-EOF && + tree HEAD^{tree} + + a/ + main1.t + EOF + git -C not-recursive show HEAD^{tree} >actual && + test_cmp expected actual +' + test_expect_success 'showing a range walks (linear)' ' cat >expect <<-EOF && commit $(git rev-parse main3) diff --git a/t/t7500-commit-template-squash-signoff.sh b/t/t7500-commit-template-squash-signoff.sh index e41ac18e7e..9092db5fdc 100755 --- a/t/t7500-commit-template-squash-signoff.sh +++ b/t/t7500-commit-template-squash-signoff.sh @@ -9,6 +9,8 @@ Tests for template, signoff, squash and -F functions.' . ./test-lib.sh +. "$TEST_DIRECTORY"/lib-rebase.sh + commit_msg_is () { expect=commit_msg_is.expect actual=commit_msg_is.actual @@ -279,6 +281,163 @@ test_expect_success 'commit --fixup -m"something" -m"extra"' ' extra" ' +get_commit_msg () { + rev="$1" && + git log -1 --pretty=format:"%B" "$rev" +} + +test_expect_success 'commit --fixup=amend: creates amend! commit' ' + commit_for_rebase_autosquash_setup && + cat >expected <<-EOF && + amend! $(git log -1 --format=%s HEAD~) + + $(get_commit_msg HEAD~) + + edited + EOF + ( + set_fake_editor && + FAKE_COMMIT_AMEND="edited" \ + git commit --fixup=amend:HEAD~ + ) && + get_commit_msg HEAD >actual && + test_cmp expected actual +' + +test_expect_success '--fixup=amend: --only ignores staged changes' ' + commit_for_rebase_autosquash_setup && + cat >expected <<-EOF && + amend! $(git log -1 --format=%s HEAD~) + + $(get_commit_msg HEAD~) + + edited + EOF + ( + set_fake_editor && + FAKE_COMMIT_AMEND="edited" \ + git commit --fixup=amend:HEAD~ --only + ) && + get_commit_msg HEAD >actual && + test_cmp expected actual && + test_cmp_rev HEAD@{1}^{tree} HEAD^{tree} && + test_cmp_rev HEAD@{1} HEAD^ && + test_expect_code 1 git diff --cached --exit-code && + git cat-file blob :foo >actual && + test_cmp foo actual +' + +test_expect_success '--fixup=reword: ignores staged changes' ' + commit_for_rebase_autosquash_setup && + cat >expected <<-EOF && + amend! $(git log -1 --format=%s HEAD~) + + $(get_commit_msg HEAD~) + + edited + EOF + ( + set_fake_editor && + FAKE_COMMIT_AMEND="edited" \ + git commit --fixup=reword:HEAD~ + ) && + get_commit_msg HEAD >actual && + test_cmp expected actual && + test_cmp_rev HEAD@{1}^{tree} HEAD^{tree} && + test_cmp_rev HEAD@{1} HEAD^ && + test_expect_code 1 git diff --cached --exit-code && + git cat-file blob :foo >actual && + test_cmp foo actual +' + +test_expect_success '--fixup=reword: error out with -m option' ' + commit_for_rebase_autosquash_setup && + echo "fatal: cannot combine -m with --fixup:reword" >expect && + test_must_fail git commit --fixup=reword:HEAD~ -m "reword commit message" 2>actual && + test_cmp expect actual +' + +test_expect_success '--fixup=amend: error out with -m option' ' + commit_for_rebase_autosquash_setup && + echo "fatal: cannot combine -m with --fixup:amend" >expect && + test_must_fail git commit --fixup=amend:HEAD~ -m "amend commit message" 2>actual && + test_cmp expect actual +' + +test_expect_success 'consecutive amend! commits remove amend! line from commit msg body' ' + commit_for_rebase_autosquash_setup && + cat >expected <<-EOF && + amend! amend! $(git log -1 --format=%s HEAD~) + + $(get_commit_msg HEAD~) + + edited 1 + + edited 2 + EOF + echo "reword new commit message" >actual && + ( + set_fake_editor && + FAKE_COMMIT_AMEND="edited 1" \ + git commit --fixup=reword:HEAD~ && + FAKE_COMMIT_AMEND="edited 2" \ + git commit --fixup=reword:HEAD + ) && + get_commit_msg HEAD >actual && + test_cmp expected actual +' + +test_expect_success 'deny to create amend! commit if its commit msg body is empty' ' + commit_for_rebase_autosquash_setup && + echo "Aborting commit due to empty commit message body." >expected && + ( + set_fake_editor && + test_must_fail env FAKE_COMMIT_MESSAGE="amend! target message subject line" \ + git commit --fixup=amend:HEAD~ 2>actual + ) && + test_cmp expected actual +' + +test_expect_success 'amend! commit allows empty commit msg body with --allow-empty-message' ' + commit_for_rebase_autosquash_setup && + cat >expected <<-EOF && + amend! $(git log -1 --format=%s HEAD~) + EOF + ( + set_fake_editor && + FAKE_COMMIT_MESSAGE="amend! target message subject line" \ + git commit --fixup=amend:HEAD~ --allow-empty-message && + get_commit_msg HEAD >actual + ) && + test_cmp expected actual +' + +test_fixup_reword_opt () { + test_expect_success C_LOCALE_OUTPUT "--fixup=reword: incompatible with $1" " + echo 'fatal: reword option of --fixup is mutually exclusive with'\ + '--patch/--interactive/--all/--include/--only' >expect && + test_must_fail git commit --fixup=reword:HEAD~ $1 2>actual && + test_cmp expect actual + " +} + +for opt in --all --include --only --interactive --patch +do + test_fixup_reword_opt $opt +done + +test_expect_success '--fixup=reword: give error with pathsec' ' + commit_for_rebase_autosquash_setup && + echo "fatal: cannot combine reword option of --fixup with path '\''foo'\''" >expect && + test_must_fail git commit --fixup=reword:HEAD~ -- foo 2>actual && + test_cmp expect actual +' + +test_expect_success '--fixup=reword: -F give error message' ' + echo "fatal: Only one of -c/-C/-F/--fixup can be used." >expect && + test_must_fail git commit --fixup=reword:HEAD~ -F msg 2>actual && + test_cmp expect actual +' test_expect_success 'commit --squash works with -F' ' commit_for_rebase_autosquash_setup && diff --git a/t/t7703-repack-geometric.sh b/t/t7703-repack-geometric.sh new file mode 100755 index 0000000000..5ccaa440e0 --- /dev/null +++ b/t/t7703-repack-geometric.sh @@ -0,0 +1,183 @@ +#!/bin/sh + +test_description='git repack --geometric works correctly' + +. ./test-lib.sh + +GIT_TEST_MULTI_PACK_INDEX=0 + +objdir=.git/objects +midx=$objdir/pack/multi-pack-index + +test_expect_success '--geometric with no packs' ' + git init geometric && + test_when_finished "rm -fr geometric" && + ( + cd geometric && + + git repack --geometric 2 >out && + test_i18ngrep "Nothing new to pack" out + ) +' + +test_expect_success '--geometric with one pack' ' + git init geometric && + test_when_finished "rm -fr geometric" && + ( + cd geometric && + + test_commit "base" && + git repack -d && + + git repack --geometric 2 >out && + + test_i18ngrep "Nothing new to pack" out + ) +' + +test_expect_success '--geometric with an intact progression' ' + git init geometric && + test_when_finished "rm -fr geometric" && + ( + cd geometric && + + # These packs already form a geometric progression. + test_commit_bulk --start=1 1 && # 3 objects + test_commit_bulk --start=2 2 && # 6 objects + test_commit_bulk --start=4 4 && # 12 objects + + find $objdir/pack -name "*.pack" | sort >expect && + git repack --geometric 2 -d && + find $objdir/pack -name "*.pack" | sort >actual && + + test_cmp expect actual + ) +' + +test_expect_success '--geometric with loose objects' ' + git init geometric && + test_when_finished "rm -fr geometric" && + ( + cd geometric && + + # These packs already form a geometric progression. + test_commit_bulk --start=1 1 && # 3 objects + test_commit_bulk --start=2 2 && # 6 objects + # The loose objects are packed together, breaking the + # progression. + test_commit loose && # 3 objects + + find $objdir/pack -name "*.pack" | sort >before && + git repack --geometric 2 -d && + find $objdir/pack -name "*.pack" | sort >after && + + comm -13 before after >new && + comm -23 before after >removed && + + test_line_count = 1 new && + test_must_be_empty removed && + + git repack --geometric 2 -d && + find $objdir/pack -name "*.pack" | sort >after && + + # The progression (3, 3, 6) is combined into one new pack. + test_line_count = 1 after + ) +' + +test_expect_success '--geometric with small-pack rollup' ' + git init geometric && + test_when_finished "rm -fr geometric" && + ( + cd geometric && + + test_commit_bulk --start=1 1 && # 3 objects + test_commit_bulk --start=2 1 && # 3 objects + find $objdir/pack -name "*.pack" | sort >small && + test_commit_bulk --start=3 4 && # 12 objects + test_commit_bulk --start=7 8 && # 24 objects + find $objdir/pack -name "*.pack" | sort >before && + + git repack --geometric 2 -d && + + # Three packs in total; two of the existing large ones, and one + # new one. + find $objdir/pack -name "*.pack" | sort >after && + test_line_count = 3 after && + comm -3 small before | tr -d "\t" >large && + grep -qFf large after + ) +' + +test_expect_success '--geometric with small- and large-pack rollup' ' + git init geometric && + test_when_finished "rm -fr geometric" && + ( + cd geometric && + + # size(small1) + size(small2) > size(medium) / 2 + test_commit_bulk --start=1 1 && # 3 objects + test_commit_bulk --start=2 1 && # 3 objects + test_commit_bulk --start=2 3 && # 7 objects + test_commit_bulk --start=6 9 && # 27 objects && + + find $objdir/pack -name "*.pack" | sort >before && + + git repack --geometric 2 -d && + + find $objdir/pack -name "*.pack" | sort >after && + comm -12 before after >untouched && + + # Two packs in total; the largest pack from before running "git + # repack", and one new one. + test_line_count = 1 untouched && + test_line_count = 2 after + ) +' + +test_expect_success '--geometric ignores kept packs' ' + git init geometric && + test_when_finished "rm -fr geometric" && + ( + cd geometric && + + test_commit kept && # 3 objects + test_commit pack && # 3 objects + + KEPT=$(git pack-objects --revs $objdir/pack/pack <<-EOF + refs/tags/kept + EOF + ) && + PACK=$(git pack-objects --revs $objdir/pack/pack <<-EOF + refs/tags/pack + ^refs/tags/kept + EOF + ) && + + # neither pack contains more than twice the number of objects in + # the other, so they should be combined. but, marking one as + # .kept on disk will "freeze" it, so the pack structure should + # remain unchanged. + touch $objdir/pack/pack-$KEPT.keep && + + find $objdir/pack -name "*.pack" | sort >before && + git repack --geometric 2 -d && + find $objdir/pack -name "*.pack" | sort >after && + + # both packs should still exist + test_path_is_file $objdir/pack/pack-$KEPT.pack && + test_path_is_file $objdir/pack/pack-$PACK.pack && + + # and no new packs should be created + test_cmp before after && + + # Passing --pack-kept-objects causes packs with a .keep file to + # be repacked, too. + git repack --geometric 2 -d --pack-kept-objects && + + find $objdir/pack -name "*.pack" >after && + test_line_count = 1 after + ) +' + +test_done diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh index edfaa9a6d1..5830733f3d 100755 --- a/t/t7810-grep.sh +++ b/t/t7810-grep.sh @@ -969,7 +969,8 @@ do " done -test_expect_success !PTHREADS 'grep --threads=N or pack.threads=N warns when no pthreads' ' +test_expect_success !PTHREADS,!FAIL_PREREQS \ + 'grep --threads=N or pack.threads=N warns when no pthreads' ' git grep --threads=2 Hello hello_world 2>err && grep ^warning: err >warnings && test_line_count = 1 warnings && diff --git a/t/t9001-send-email.sh b/t/t9001-send-email.sh index 4eee9c3dcb..1a1caf8f2e 100755 --- a/t/t9001-send-email.sh +++ b/t/t9001-send-email.sh @@ -513,6 +513,38 @@ do done +test_expect_success $PREREQ "--validate respects relative core.hooksPath path" ' + clean_fake_sendmail && + mkdir my-hooks && + test_when_finished "rm my-hooks.ran" && + write_script my-hooks/sendemail-validate <<-\EOF && + >my-hooks.ran + exit 1 + EOF + test_config core.hooksPath "my-hooks" && + test_must_fail git send-email \ + --from="Example <nobody@example.com>" \ + --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + --validate \ + longline.patch 2>err && + test_path_is_file my-hooks.ran && + grep "rejected by sendemail-validate" err +' + +test_expect_success $PREREQ "--validate respects absolute core.hooksPath path" ' + test_config core.hooksPath "$(pwd)/my-hooks" && + test_when_finished "rm my-hooks.ran" && + test_must_fail git send-email \ + --from="Example <nobody@example.com>" \ + --to=nobody@example.com \ + --smtp-server="$(pwd)/fake.sendmail" \ + --validate \ + longline.patch 2>err && + test_path_is_file my-hooks.ran && + grep "rejected by sendemail-validate" err +' + for enc in 7bit 8bit quoted-printable base64 do test_expect_success $PREREQ "--transfer-encoding=$enc produces correct header" ' diff --git a/transport.c b/transport.c index 1c4ab676d1..eb4b2d4e47 100644 --- a/transport.c +++ b/transport.c @@ -1452,6 +1452,8 @@ int transport_disconnect(struct transport *transport) int ret = 0; if (transport->vtable->disconnect) ret = transport->vtable->disconnect(transport); + if (transport->got_remote_refs) + free_refs((void *)transport->remote_refs); free(transport); return ret; } @@ -11,58 +11,10 @@ const char *tree_type = "tree"; -static int read_one_entry_opt(struct index_state *istate, - const struct object_id *oid, - const char *base, int baselen, - const char *pathname, - unsigned mode, int stage, int opt) -{ - int len; - struct cache_entry *ce; - - if (S_ISDIR(mode)) - return READ_TREE_RECURSIVE; - - len = strlen(pathname); - ce = make_empty_cache_entry(istate, baselen + len); - - ce->ce_mode = create_ce_mode(mode); - ce->ce_flags = create_ce_flags(stage); - ce->ce_namelen = baselen + len; - memcpy(ce->name, base, baselen); - memcpy(ce->name + baselen, pathname, len+1); - oidcpy(&ce->oid, oid); - return add_index_entry(istate, ce, opt); -} - -static int read_one_entry(const struct object_id *oid, struct strbuf *base, - const char *pathname, unsigned mode, int stage, - void *context) -{ - struct index_state *istate = context; - return read_one_entry_opt(istate, oid, base->buf, base->len, pathname, - mode, stage, - ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK); -} - -/* - * This is used when the caller knows there is no existing entries at - * the stage that will conflict with the entry being added. - */ -static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base, - const char *pathname, unsigned mode, int stage, - void *context) -{ - struct index_state *istate = context; - return read_one_entry_opt(istate, oid, base->buf, base->len, pathname, - mode, stage, - ADD_CACHE_JUST_APPEND); -} - -static int read_tree_1(struct repository *r, - struct tree *tree, struct strbuf *base, - int stage, const struct pathspec *pathspec, - read_tree_fn_t fn, void *context) +int read_tree_at(struct repository *r, + struct tree *tree, struct strbuf *base, + const struct pathspec *pathspec, + read_tree_fn_t fn, void *context) { struct tree_desc desc; struct name_entry entry; @@ -86,7 +38,7 @@ static int read_tree_1(struct repository *r, } switch (fn(&entry.oid, base, - entry.path, entry.mode, stage, context)) { + entry.path, entry.mode, context)) { case 0: continue; case READ_TREE_RECURSIVE: @@ -119,9 +71,9 @@ static int read_tree_1(struct repository *r, len = tree_entry_len(&entry); strbuf_add(base, entry.path, len); strbuf_addch(base, '/'); - retval = read_tree_1(r, lookup_tree(r, &oid), - base, stage, pathspec, - fn, context); + retval = read_tree_at(r, lookup_tree(r, &oid), + base, pathspec, + fn, context); strbuf_setlen(base, oldlen); if (retval) return -1; @@ -129,17 +81,13 @@ static int read_tree_1(struct repository *r, return 0; } -int read_tree_recursive(struct repository *r, - struct tree *tree, - const char *base, int baselen, - int stage, const struct pathspec *pathspec, - read_tree_fn_t fn, void *context) +int read_tree(struct repository *r, + struct tree *tree, + const struct pathspec *pathspec, + read_tree_fn_t fn, void *context) { struct strbuf sb = STRBUF_INIT; - int ret; - - strbuf_add(&sb, base, baselen); - ret = read_tree_1(r, tree, &sb, stage, pathspec, fn, context); + int ret = read_tree_at(r, tree, &sb, pathspec, fn, context); strbuf_release(&sb); return ret; } @@ -154,47 +102,6 @@ int cmp_cache_name_compare(const void *a_, const void *b_) ce2->name, ce2->ce_namelen, ce_stage(ce2)); } -int read_tree(struct repository *r, struct tree *tree, int stage, - struct pathspec *match, struct index_state *istate) -{ - read_tree_fn_t fn = NULL; - int i, err; - - /* - * Currently the only existing callers of this function all - * call it with stage=1 and after making sure there is nothing - * at that stage; we could always use read_one_entry_quick(). - * - * But when we decide to straighten out git-read-tree not to - * use unpack_trees() in some cases, this will probably start - * to matter. - */ - - /* - * See if we have cache entry at the stage. If so, - * do it the original slow way, otherwise, append and then - * sort at the end. - */ - for (i = 0; !fn && i < istate->cache_nr; i++) { - const struct cache_entry *ce = istate->cache[i]; - if (ce_stage(ce) == stage) - fn = read_one_entry; - } - - if (!fn) - fn = read_one_entry_quick; - err = read_tree_recursive(r, tree, "", 0, stage, match, fn, istate); - if (fn == read_one_entry || err) - return err; - - /* - * Sort the cache entry -- we need to nuke the cache tree, though. - */ - cache_tree_free(&istate->cache_tree); - QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare); - return 0; -} - struct tree *lookup_tree(struct repository *r, const struct object_id *oid) { struct object *obj = lookup_object(r, oid); @@ -31,16 +31,16 @@ struct tree *parse_tree_indirect(const struct object_id *oid); int cmp_cache_name_compare(const void *a_, const void *b_); #define READ_TREE_RECURSIVE 1 -typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, int, void *); +typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, void *); -int read_tree_recursive(struct repository *r, - struct tree *tree, - const char *base, int baselen, - int stage, const struct pathspec *pathspec, - read_tree_fn_t fn, void *context); +int read_tree_at(struct repository *r, + struct tree *tree, struct strbuf *base, + const struct pathspec *pathspec, + read_tree_fn_t fn, void *context); -int read_tree(struct repository *r, struct tree *tree, - int stage, struct pathspec *pathspec, - struct index_state *istate); +int read_tree(struct repository *r, + struct tree *tree, + const struct pathspec *pathspec, + read_tree_fn_t fn, void *context); #endif /* TREE_H */ diff --git a/unix-socket.c b/unix-socket.c index 19ed48be99..e0be1badb5 100644 --- a/unix-socket.c +++ b/unix-socket.c @@ -1,13 +1,7 @@ #include "cache.h" #include "unix-socket.h" -static int unix_stream_socket(void) -{ - int fd = socket(AF_UNIX, SOCK_STREAM, 0); - if (fd < 0) - die_errno("unable to create socket"); - return fd; -} +#define DEFAULT_UNIX_STREAM_LISTEN_BACKLOG (5) static int chdir_len(const char *orig, int len) { @@ -36,16 +30,23 @@ static void unix_sockaddr_cleanup(struct unix_sockaddr_context *ctx) } static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path, - struct unix_sockaddr_context *ctx) + struct unix_sockaddr_context *ctx, + int disallow_chdir) { int size = strlen(path) + 1; ctx->orig_dir = NULL; if (size > sizeof(sa->sun_path)) { - const char *slash = find_last_dir_sep(path); + const char *slash; const char *dir; struct strbuf cwd = STRBUF_INIT; + if (disallow_chdir) { + errno = ENAMETOOLONG; + return -1; + } + + slash = find_last_dir_sep(path); if (!slash) { errno = ENAMETOOLONG; return -1; @@ -71,15 +72,18 @@ static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path, return 0; } -int unix_stream_connect(const char *path) +int unix_stream_connect(const char *path, int disallow_chdir) { - int fd, saved_errno; + int fd = -1, saved_errno; struct sockaddr_un sa; struct unix_sockaddr_context ctx; - if (unix_sockaddr_init(&sa, path, &ctx) < 0) + if (unix_sockaddr_init(&sa, path, &ctx, disallow_chdir) < 0) return -1; - fd = unix_stream_socket(); + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (fd < 0) + goto fail; + if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0) goto fail; unix_sockaddr_cleanup(&ctx); @@ -87,28 +91,36 @@ int unix_stream_connect(const char *path) fail: saved_errno = errno; + if (fd != -1) + close(fd); unix_sockaddr_cleanup(&ctx); - close(fd); errno = saved_errno; return -1; } -int unix_stream_listen(const char *path) +int unix_stream_listen(const char *path, + const struct unix_stream_listen_opts *opts) { - int fd, saved_errno; + int fd = -1, saved_errno; + int backlog; struct sockaddr_un sa; struct unix_sockaddr_context ctx; unlink(path); - if (unix_sockaddr_init(&sa, path, &ctx) < 0) + if (unix_sockaddr_init(&sa, path, &ctx, opts->disallow_chdir) < 0) return -1; - fd = unix_stream_socket(); + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (fd < 0) + goto fail; if (bind(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0) goto fail; - if (listen(fd, 5) < 0) + backlog = opts->listen_backlog_size; + if (backlog <= 0) + backlog = DEFAULT_UNIX_STREAM_LISTEN_BACKLOG; + if (listen(fd, backlog) < 0) goto fail; unix_sockaddr_cleanup(&ctx); @@ -116,8 +128,9 @@ int unix_stream_listen(const char *path) fail: saved_errno = errno; + if (fd != -1) + close(fd); unix_sockaddr_cleanup(&ctx); - close(fd); errno = saved_errno; return -1; } diff --git a/unix-socket.h b/unix-socket.h index e271aeec5a..8542cdd799 100644 --- a/unix-socket.h +++ b/unix-socket.h @@ -1,7 +1,15 @@ #ifndef UNIX_SOCKET_H #define UNIX_SOCKET_H -int unix_stream_connect(const char *path); -int unix_stream_listen(const char *path); +struct unix_stream_listen_opts { + int listen_backlog_size; + unsigned int disallow_chdir:1; +}; + +#define UNIX_STREAM_LISTEN_OPTS_INIT { 0 } + +int unix_stream_connect(const char *path, int disallow_chdir); +int unix_stream_listen(const char *path, + const struct unix_stream_listen_opts *opts); #endif /* UNIX_SOCKET_H */ diff --git a/unix-stream-server.c b/unix-stream-server.c new file mode 100644 index 0000000000..efa2a207ab --- /dev/null +++ b/unix-stream-server.c @@ -0,0 +1,125 @@ +#include "cache.h" +#include "lockfile.h" +#include "unix-socket.h" +#include "unix-stream-server.h" + +#define DEFAULT_LOCK_TIMEOUT (100) + +/* + * Try to connect to a unix domain socket at `path` (if it exists) and + * see if there is a server listening. + * + * We don't know if the socket exists, whether a server died and + * failed to cleanup, or whether we have a live server listening, so + * we "poke" it. + * + * We immediately hangup without sending/receiving any data because we + * don't know anything about the protocol spoken and don't want to + * block while writing/reading data. It is sufficient to just know + * that someone is listening. + */ +static int is_another_server_alive(const char *path, + const struct unix_stream_listen_opts *opts) +{ + int fd = unix_stream_connect(path, opts->disallow_chdir); + if (fd >= 0) { + close(fd); + return 1; + } + + return 0; +} + +int unix_ss_create(const char *path, + const struct unix_stream_listen_opts *opts, + long timeout_ms, + struct unix_ss_socket **new_server_socket) +{ + struct lock_file lock = LOCK_INIT; + int fd_socket; + struct unix_ss_socket *server_socket; + + *new_server_socket = NULL; + + if (timeout_ms < 0) + timeout_ms = DEFAULT_LOCK_TIMEOUT; + + /* + * Create a lock at "<path>.lock" if we can. + */ + if (hold_lock_file_for_update_timeout(&lock, path, 0, timeout_ms) < 0) + return -1; + + /* + * If another server is listening on "<path>" give up. We do not + * want to create a socket and steal future connections from them. + */ + if (is_another_server_alive(path, opts)) { + rollback_lock_file(&lock); + errno = EADDRINUSE; + return -2; + } + + /* + * Create and bind to a Unix domain socket at "<path>". + */ + fd_socket = unix_stream_listen(path, opts); + if (fd_socket < 0) { + int saved_errno = errno; + rollback_lock_file(&lock); + errno = saved_errno; + return -1; + } + + server_socket = xcalloc(1, sizeof(*server_socket)); + server_socket->path_socket = strdup(path); + server_socket->fd_socket = fd_socket; + lstat(path, &server_socket->st_socket); + + *new_server_socket = server_socket; + + /* + * Always rollback (just delete) "<path>.lock" because we already created + * "<path>" as a socket and do not want to commit_lock to do the atomic + * rename trick. + */ + rollback_lock_file(&lock); + + return 0; +} + +void unix_ss_free(struct unix_ss_socket *server_socket) +{ + if (!server_socket) + return; + + if (server_socket->fd_socket >= 0) { + if (!unix_ss_was_stolen(server_socket)) + unlink(server_socket->path_socket); + close(server_socket->fd_socket); + } + + free(server_socket->path_socket); + free(server_socket); +} + +int unix_ss_was_stolen(struct unix_ss_socket *server_socket) +{ + struct stat st_now; + + if (!server_socket) + return 0; + + if (lstat(server_socket->path_socket, &st_now) == -1) + return 1; + + if (st_now.st_ino != server_socket->st_socket.st_ino) + return 1; + if (st_now.st_dev != server_socket->st_socket.st_dev) + return 1; + + if (!S_ISSOCK(st_now.st_mode)) + return 1; + + return 0; +} diff --git a/unix-stream-server.h b/unix-stream-server.h new file mode 100644 index 0000000000..ae2712ba39 --- /dev/null +++ b/unix-stream-server.h @@ -0,0 +1,33 @@ +#ifndef UNIX_STREAM_SERVER_H +#define UNIX_STREAM_SERVER_H + +#include "unix-socket.h" + +struct unix_ss_socket { + char *path_socket; + struct stat st_socket; + int fd_socket; +}; + +/* + * Create a Unix Domain Socket at the given path under the protection + * of a '.lock' lockfile. + * + * Returns 0 on success, -1 on error, -2 if socket is in use. + */ +int unix_ss_create(const char *path, + const struct unix_stream_listen_opts *opts, + long timeout_ms, + struct unix_ss_socket **server_socket); + +/* + * Close and delete the socket. + */ +void unix_ss_free(struct unix_ss_socket *server_socket); + +/* + * Return 1 if the inode of the pathname to our socket changes. + */ +int unix_ss_was_stolen(struct unix_ss_socket *server_socket); + +#endif /* UNIX_STREAM_SERVER_H */ diff --git a/unpack-trees.c b/unpack-trees.c index 9af8e796b3..8a1afbc1e4 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -16,6 +16,7 @@ #include "fsmonitor.h" #include "object-store.h" #include "promisor-remote.h" +#include "entry.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -2097,7 +2098,7 @@ static int verify_absent_1(const struct cache_entry *ce, if (o->index_only || o->reset || !o->update) return 0; - len = check_leading_path(ce->name, ce_namelen(ce)); + len = check_leading_path(ce->name, ce_namelen(ce), 0); if (!len) return 0; else if (len > 0) { |