summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Documentation/RelNotes/1.7.10.txt92
-rw-r--r--Documentation/RelNotes/1.7.9.1.txt36
-rw-r--r--Documentation/git-am.txt3
-rw-r--r--Documentation/git-clone.txt16
-rw-r--r--Documentation/git-merge.txt2
-rw-r--r--Documentation/git-p4.txt5
-rw-r--r--Documentation/git.txt3
-rw-r--r--Documentation/howto/using-signed-tag-in-pull-request.txt4
-rw-r--r--Documentation/merge-options.txt17
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--Makefile69
l---------RelNotes2
-rw-r--r--advice.c37
-rw-r--r--advice.h1
-rw-r--r--builtin/add.c1
-rw-r--r--builtin/branch.c15
-rw-r--r--builtin/checkout.c16
-rw-r--r--builtin/clone.c340
-rw-r--r--builtin/commit.c12
-rw-r--r--builtin/fetch.c2
-rw-r--r--builtin/grep.c5
-rw-r--r--builtin/index-pack.c164
-rw-r--r--builtin/mailinfo.c11
-rw-r--r--builtin/merge.c54
-rw-r--r--builtin/receive-pack.c19
-rw-r--r--builtin/remote.c2
-rw-r--r--builtin/revert.c954
-rw-r--r--builtin/send-pack.c13
-rw-r--r--builtin/tag.c72
-rw-r--r--cache-tree.c6
-rw-r--r--cache.h1
-rw-r--r--commit.c13
-rw-r--r--compat/inet_ntop.c6
-rw-r--r--compat/inet_pton.c6
-rw-r--r--connect.c23
-rwxr-xr-xcontrib/completion/git-completion.bash71
-rwxr-xr-xcontrib/fast-import/git-p4136
-rw-r--r--contrib/svn-fe/svn-fe.txt8
-rw-r--r--ctype.c2
-rw-r--r--daemon.c6
-rw-r--r--date.c30
-rwxr-xr-xgit-am.sh20
-rw-r--r--git-compat-util.h15
-rwxr-xr-xgit-mergetool.sh12
-rwxr-xr-xgit-request-pull.sh2
-rw-r--r--git-sh-i18n.sh106
-rw-r--r--git-sh-setup.sh2
-rwxr-xr-xgit-submodule.sh1
-rw-r--r--git.c2
-rw-r--r--git.spec.in1
-rwxr-xr-xgitweb/gitweb.perl112
-rw-r--r--gitweb/static/gitweb.css7
-rw-r--r--imap-send.c23
-rw-r--r--log-tree.c4
-rw-r--r--object.c9
-rw-r--r--pack-refs.c3
-rw-r--r--refs.c39
-rw-r--r--refs.h6
-rw-r--r--remote-curl.c13
-rw-r--r--remote.c32
-rw-r--r--revision.c55
-rw-r--r--run-command.c69
-rw-r--r--run-command.h2
-rw-r--r--sequencer.c917
-rw-r--r--sequencer.h37
-rw-r--r--sha1_file.c5
-rw-r--r--t/Makefile6
-rw-r--r--t/README13
-rw-r--r--t/lib-git-daemon.sh69
-rwxr-xr-xt/t0080-vcs-svn.sh117
-rwxr-xr-xt/t0300-credentials.sh8
-rwxr-xr-xt/t2203-add-intent.sh8
-rwxr-xr-xt/t3200-branch.sh41
-rwxr-xr-xt/t3400-rebase.sh23
-rwxr-xr-xt/t4015-diff-whitespace.sh14
-rwxr-xr-xt/t4150-am.sh14
-rwxr-xr-xt/t5150-request-pull.sh6
-rwxr-xr-xt/t5500-fetch-pack.sh80
-rwxr-xr-xt/t5523-push-upstream.sh7
-rwxr-xr-xt/t5541-http-push.sh36
-rwxr-xr-xt/t5570-git-daemon.sh148
-rwxr-xr-xt/t5601-clone.sh40
-rwxr-xr-xt/t5706-clone-branch.sh8
-rwxr-xr-xt/t6012-rev-list-simplify.sh1
-rwxr-xr-xt/t7004-tag.sh13
-rwxr-xr-xt/t7406-submodule-update.sh8
-rwxr-xr-xt/t7600-merge.sh25
-rwxr-xr-xt/t7610-mergetool.sh28
-rwxr-xr-xt/t7810-grep.sh22
-rwxr-xr-xt/t9010-svn-fe.sh365
-rwxr-xr-xt/t9011-svn-da.sh248
-rwxr-xr-xt/t9500-gitweb-standalone-no-errors.sh8
-rwxr-xr-xt/t9801-git-p4-branch.sh94
-rwxr-xr-xt/t9803-git-p4-shell-metachars.sh48
-rwxr-xr-xt/t9804-git-p4-label.sh113
-rwxr-xr-xt/t9806-git-p4-options.sh4
-rwxr-xr-xt/t9809-git-p4-client-view.sh395
-rw-r--r--t/test-lib.sh25
-rw-r--r--tag.c12
-rw-r--r--tag.h1
-rw-r--r--test-obj-pool.c116
-rw-r--r--test-string-pool.c31
-rw-r--r--test-svn-fe.c50
-rw-r--r--test-treap.c70
-rw-r--r--upload-pack.c33
-rw-r--r--vcs-svn/LICENSE3
-rw-r--r--vcs-svn/fast_export.c256
-rw-r--r--vcs-svn/fast_export.h26
-rw-r--r--vcs-svn/line_buffer.c6
-rw-r--r--vcs-svn/line_buffer.h2
-rw-r--r--vcs-svn/obj_pool.h61
-rw-r--r--vcs-svn/repo_tree.c330
-rw-r--r--vcs-svn/repo_tree.h12
-rw-r--r--vcs-svn/sliding_window.c79
-rw-r--r--vcs-svn/sliding_window.h18
-rw-r--r--vcs-svn/string_pool.c102
-rw-r--r--vcs-svn/string_pool.h11
-rw-r--r--vcs-svn/string_pool.txt43
-rw-r--r--vcs-svn/svndiff.c308
-rw-r--r--vcs-svn/svndiff.h10
-rw-r--r--vcs-svn/svndump.c134
-rw-r--r--vcs-svn/trp.h237
-rw-r--r--vcs-svn/trp.txt109
-rw-r--r--xdiff/xemit.c12
125 files changed, 4961 insertions, 2920 deletions
diff --git a/.gitignore b/.gitignore
index 3b7680ea1e..87fcc5f6ff 100644
--- a/.gitignore
+++ b/.gitignore
@@ -181,16 +181,13 @@
/test-line-buffer
/test-match-trees
/test-mktemp
-/test-obj-pool
/test-parse-options
/test-path-utils
/test-run-command
/test-sha1
/test-sigchain
-/test-string-pool
/test-subprocess
/test-svn-fe
-/test-treap
/common-cmds.h
*.tar.gz
*.dsc
diff --git a/Documentation/RelNotes/1.7.10.txt b/Documentation/RelNotes/1.7.10.txt
new file mode 100644
index 0000000000..e255901356
--- /dev/null
+++ b/Documentation/RelNotes/1.7.10.txt
@@ -0,0 +1,92 @@
+Git v1.7.10 Release Notes
+=========================
+
+Updates since v1.7.9
+--------------------
+
+UI, Workflows & Features
+
+ * Improved handling of views, labels and branches in git-p4 (in contrib).
+
+ * "vcs-svn"/"svn-fe" learned to read dumps with svn-deltas and
+ support incremental imports.
+
+ * "git am" learned to pass "-b" option to underlying "git mailinfo", so
+ that bracketed string other than "PATCH" at the beginning can be kept.
+
+ * "git clone" learned "--single-branch" option to limit cloning to a
+ single branch (surprise!).
+
+ * "git clone" learned to detach the HEAD in the resulting repository
+ when the source repository's HEAD does not point to a branch.
+
+ * When showing a patch while ignoring whitespace changes, the context
+ lines are taken from the postimage, in order to make it easier to
+ view the output.
+
+ * "git merge" in an interactive session learned to spawn the editor
+ by default to let the user edit the auto-generated merge message,
+ to encourage people to explain their merges better. Legacy scripts
+ can export MERGE_AUTOEDIT=no to retain the historical behaviour.
+
+ * "gitweb" allows intermediate entries in the directory hierarchy
+ that leads to a projects to be clicked, which in turn shows the
+ list of projects inside that directory.
+
+Performance
+
+ * During "git upload-pack" in respose to "git fetch", unnecessary calls
+ to parse_object() have been eliminated, to help performance in
+ repositories with excessive number of refs.
+
+Internal Implementation
+
+ * Recursive call chains in "git index-pack" to deal with long delta
+ chains have been flattened, to reduce the stack footprint.
+
+ * Use of add_extra_ref() API is slowly getting removed, to make it
+ possible to cleanly restructure the overall refs API.
+
+ * The test suite supports the new "test_pause" helper function.
+
+ * t/Makefile is adjusted to prevent newer versions of GNU make from
+ running tests in seemingly random order.
+
+Also contains minor documentation updates and code clean-ups.
+
+
+Fixes since v1.7.9
+------------------
+
+Unless otherwise noted, all the fixes since v1.7.9 in the maintenance
+releases are contained in this release (see release notes to them for
+details).
+
+ * "add -e" learned not to show a diff for an otherwise unmodified
+ submodule that only has uncommitted local changes in the patch
+ prepared by for the user to edit.
+ (merge 701825d js/add-e-submodule-fix later to maint).
+
+ * "rebase" and "commit --amend" failed to work on commits with ancient
+ timestamps near year 1970.
+ (merge 2c733fb jc/parse-date-raw later to maint).
+
+ * "git merge --ff-only $tag" failed because it cannot record the
+ required mergetag without creating a merge, but this is so common
+ operation for branch that is used _only_ to follow the upstream, so
+ it is allowed to fast-forward without recording the mergetag.
+ (merge b5c9f1c jc/merge-ff-only-stronger-than-signed-merge later to maint).
+
+ * Typo in "git branch --edit-description my-tpoic" was not diagnosed.
+ (merge c2d17ba jc/branch-desc-typoavoidance later to maint).
+
+ * rpmbuild noticed an unpackaged but installed *.mo file and failed.
+ (merge 3a9f58c jn/rpm-spec later to maint).
+
+---
+exec >/var/tmp/1
+O=v1.7.9-208-gee8d52f
+echo O=$(git describe)
+git log --first-parent --oneline ^maint $O..
+echo
+git shortlog --no-merges ^maint $O..
diff --git a/Documentation/RelNotes/1.7.9.1.txt b/Documentation/RelNotes/1.7.9.1.txt
new file mode 100644
index 0000000000..a6c15e5d52
--- /dev/null
+++ b/Documentation/RelNotes/1.7.9.1.txt
@@ -0,0 +1,36 @@
+Git v1.7.9.1 Release Notes
+==========================
+
+Fixes since v1.7.9
+------------------
+
+ * Subprocesses spawned from various git programs were often left running
+ to completion even when the top-level process was killed.
+
+ * Using "git grep -l/-L" together with options -W or --break may not
+ make much sense as the output is to only count the number of hits
+ and there is no place for file breaks, but the latter options made
+ "-l/-L" to miscount the hits.
+
+ * "git log --first-parent $pathspec" did not stay on the first parent
+ chain and veered into side branch from which the whole change to the
+ specified paths came.
+
+ * "git push -q" was not sufficiently quiet.
+
+ * When "git push" fails to update any refs, the client side did not
+ report an error correctly to the end user.
+
+ * "git mergetool" now gives an empty file as the common base version
+ to the backend when dealing with the "both sides added, differently"
+ case.
+
+ * When asking for a tag to be pulled, "request-pull" did not show the
+ name of the tag prefixed with "tags/", which would have helped older
+ clients.
+
+ * "git submodule add $path" forgot to recompute the name to be stored
+ in .gitmodules when the submodule at $path was once added to the
+ superproject and already initialized.
+
+Also contains minor fixes and documentation updates.
diff --git a/Documentation/git-am.txt b/Documentation/git-am.txt
index 887466d777..ee6cca2e13 100644
--- a/Documentation/git-am.txt
+++ b/Documentation/git-am.txt
@@ -40,6 +40,9 @@ OPTIONS
--keep::
Pass `-k` flag to 'git mailinfo' (see linkgit:git-mailinfo[1]).
+--keep-non-patch::
+ Pass `-b` flag to 'git mailinfo' (see linkgit:git-mailinfo[1]).
+
--keep-cr::
--no-keep-cr::
With `--keep-cr`, call 'git mailsplit' (see linkgit:git-mailsplit[1])
diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt
index 4b8b26b75e..6e22522c4f 100644
--- a/Documentation/git-clone.txt
+++ b/Documentation/git-clone.txt
@@ -13,7 +13,8 @@ SYNOPSIS
[-l] [-s] [--no-hardlinks] [-q] [-n] [--bare] [--mirror]
[-o <name>] [-b <name>] [-u <upload-pack>] [--reference <repository>]
[--separate-git-dir <git dir>]
- [--depth <depth>] [--recursive|--recurse-submodules] [--] <repository>
+ [--depth <depth>] [--[no-]single-branch]
+ [--recursive|--recurse-submodules] [--] <repository>
[<directory>]
DESCRIPTION
@@ -146,8 +147,9 @@ objects from the source repository into a pack in the cloned repository.
-b <name>::
Instead of pointing the newly created HEAD to the branch pointed
to by the cloned repository's HEAD, point to `<name>` branch
- instead. In a non-bare repository, this is the branch that will
- be checked out.
+ instead. `--branch` can also take tags and treat them like
+ detached HEAD. In a non-bare repository, this is the branch
+ that will be checked out.
--upload-pack <upload-pack>::
-u <upload-pack>::
@@ -179,6 +181,14 @@ objects from the source repository into a pack in the cloned repository.
with a long history, and would want to send in fixes
as patches.
+--single-branch::
+ Clone only the history leading to the tip of a single branch,
+ either specified by the `--branch` option or the primary
+ branch remote's `HEAD` points at. When creating a shallow
+ clone with the `--depth` option, this is the default, unless
+ `--no-single-branch` is given to fetch the histories near the
+ tips of all branches.
+
--recursive::
--recurse-submodules::
After the clone is created, initialize all submodules within,
diff --git a/Documentation/git-merge.txt b/Documentation/git-merge.txt
index e2e6aba17e..3ceefb8a1f 100644
--- a/Documentation/git-merge.txt
+++ b/Documentation/git-merge.txt
@@ -9,7 +9,7 @@ git-merge - Join two or more development histories together
SYNOPSIS
--------
[verse]
-'git merge' [-n] [--stat] [--no-commit] [--squash]
+'git merge' [-n] [--stat] [--no-commit] [--squash] [--[no-]edit]
[-s <strategy>] [-X <strategy-option>]
[--[no-]rerere-autoupdate] [-m <msg>] [<commit>...]
'git merge' <msg> HEAD <commit>...
diff --git a/Documentation/git-p4.txt b/Documentation/git-p4.txt
index 78938b2930..8b92cc0f8d 100644
--- a/Documentation/git-p4.txt
+++ b/Documentation/git-p4.txt
@@ -314,6 +314,11 @@ around whitespace. Of the possible wildcards, git-p4 only handles
'...', and only when it is at the end of the path. Git-p4 will complain
if it encounters an unhandled wildcard.
+Bugs in the implementation of overlap mappings exist. If multiple depot
+paths map through overlays to the same location in the repository,
+git-p4 can choose the wrong one. This is hard to solve without
+dedicating a client spec just for git-p4.
+
The name of the client can be given to git-p4 in multiple ways. The
variable 'git-p4.client' takes precedence if it exists. Otherwise,
normal p4 mechanisms of determining the client are used: environment
diff --git a/Documentation/git.txt b/Documentation/git.txt
index c991430642..f7e201fae3 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -69,9 +69,10 @@ Documentation for older releases are available here:
link:RelNotes/1.7.7.1.txt[1.7.7.1],
link:RelNotes/1.7.7.txt[1.7.7].
-* link:v1.7.6.5/git.html[documentation for release 1.7.6.5]
+* link:v1.7.6.6/git.html[documentation for release 1.7.6.6]
* release notes for
+ link:RelNotes/1.7.6.6.txt[1.7.6.6],
link:RelNotes/1.7.6.5.txt[1.7.6.5],
link:RelNotes/1.7.6.4.txt[1.7.6.4],
link:RelNotes/1.7.6.3.txt[1.7.6.3],
diff --git a/Documentation/howto/using-signed-tag-in-pull-request.txt b/Documentation/howto/using-signed-tag-in-pull-request.txt
index a1351c5bb8..98c0033a55 100644
--- a/Documentation/howto/using-signed-tag-in-pull-request.txt
+++ b/Documentation/howto/using-signed-tag-in-pull-request.txt
@@ -109,7 +109,7 @@ The resulting msg.txt file begins like so:
are available in the git repository at:
- example.com:/git/froboz.git frotz-for-xyzzy
+ example.com:/git/froboz.git tags/frotz-for-xyzzy
for you to fetch changes up to 703f05ad5835c...:
@@ -141,7 +141,7 @@ After receiving such a pull request message, the integrator fetches and
integrates the tag named in the request, with:
------------
- $ git pull example.com:/git/froboz.git/ frotz-for-xyzzy
+ $ git pull example.com:/git/froboz.git/ tags/frotz-for-xyzzy
------------
This operation will always open an editor to allow the integrator to fine
diff --git a/Documentation/merge-options.txt b/Documentation/merge-options.txt
index 1a5c12e317..f2f1d0f51c 100644
--- a/Documentation/merge-options.txt
+++ b/Documentation/merge-options.txt
@@ -8,9 +8,20 @@ failed and do not autocommit, to give the user a chance to
inspect and further tweak the merge result before committing.
--edit::
--e::
- Invoke editor before committing successful merge to further
- edit the default merge message.
+--no-edit::
+ Invoke an editor before committing successful mechanical merge to
+ further edit the auto-generated merge message, so that the user
+ can explain and justify the merge. The `--no-edit` option can be
+ used to accept the auto-generated message (this is generally
+ discouraged). The `--edit` option is still useful if you are
+ giving a draft message with the `-m` option from the command line
+ and want to edit it in the editor.
++
+Older scripts may depend on the historical behaviour of not allowing the
+user to edit the merge log message. They will see an editor opened when
+they run `git merge`. To make it easier to adjust such scripts to the
+updated behaviour, the environment variable `GIT_MERGE_AUTOEDIT` can be
+set to `no` at the beginning of them.
--ff::
--no-ff::
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 70204f87f9..c25fd2a374 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v1.7.9
+DEF_VER=v1.7.9.GIT
LF='
'
diff --git a/Makefile b/Makefile
index a782409306..c92c2ba24d 100644
--- a/Makefile
+++ b/Makefile
@@ -47,6 +47,9 @@ all::
# A translated Git requires GNU libintl or another gettext implementation,
# plus libintl-perl at runtime.
#
+# Define USE_GETTEXT_SCHEME and set it to 'fallthrough', if you don't trust
+# the installed gettext translation of the shell scripts output.
+#
# Define HAVE_LIBCHARSET_H if you haven't set NO_GETTEXT and you can't
# trust the langinfo.h's nl_langinfo(CODESET) function to return the
# current character set. GNU and Solaris have a nl_langinfo(CODESET),
@@ -374,6 +377,11 @@ BUILTIN_OBJS =
BUILT_INS =
COMPAT_CFLAGS =
COMPAT_OBJS =
+XDIFF_H =
+XDIFF_OBJS =
+VCSSVN_H =
+VCSSVN_OBJS =
+VCSSVN_TEST_OBJS =
EXTRA_CPPFLAGS =
LIB_H =
LIB_OBJS =
@@ -452,6 +460,9 @@ PROGRAM_OBJS += http-backend.o
PROGRAM_OBJS += sh-i18n--envsubst.o
PROGRAM_OBJS += credential-store.o
+# Binary suffix, set to .exe for Windows builds
+X =
+
PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
TEST_PROGRAMS_NEED_X += test-chmtime
@@ -466,16 +477,13 @@ TEST_PROGRAMS_NEED_X += test-index-version
TEST_PROGRAMS_NEED_X += test-line-buffer
TEST_PROGRAMS_NEED_X += test-match-trees
TEST_PROGRAMS_NEED_X += test-mktemp
-TEST_PROGRAMS_NEED_X += test-obj-pool
TEST_PROGRAMS_NEED_X += test-parse-options
TEST_PROGRAMS_NEED_X += test-path-utils
TEST_PROGRAMS_NEED_X += test-run-command
TEST_PROGRAMS_NEED_X += test-sha1
TEST_PROGRAMS_NEED_X += test-sigchain
-TEST_PROGRAMS_NEED_X += test-string-pool
TEST_PROGRAMS_NEED_X += test-subprocess
TEST_PROGRAMS_NEED_X += test-svn-fe
-TEST_PROGRAMS_NEED_X += test-treap
TEST_PROGRAMS = $(patsubst %,%$X,$(TEST_PROGRAMS_NEED_X))
@@ -1521,6 +1529,7 @@ ifdef GETTEXT_POISON
endif
ifdef NO_GETTEXT
BASIC_CFLAGS += -DNO_GETTEXT
+ USE_GETTEXT_SCHEME ?= fallthrough
endif
ifdef NO_STRCASESTR
COMPAT_CFLAGS += -DNO_STRCASESTR
@@ -1887,6 +1896,7 @@ sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
-e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \
-e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' \
-e 's/@@NO_CURL@@/$(NO_CURL)/g' \
+ -e 's/@@USE_GETTEXT_SCHEME@@/$(USE_GETTEXT_SCHEME)/g' \
-e $(BROKEN_PATH_FIX) \
$@.sh >$@+
endef
@@ -1988,12 +1998,24 @@ GIT_OBJS := $(LIB_OBJS) $(BUILTIN_OBJS) $(PROGRAM_OBJS) $(TEST_OBJS) \
ifndef NO_CURL
GIT_OBJS += http.o http-walker.o remote-curl.o
endif
-XDIFF_OBJS = xdiff/xdiffi.o xdiff/xprepare.o xdiff/xutils.o xdiff/xemit.o \
- xdiff/xmerge.o xdiff/xpatience.o xdiff/xhistogram.o
-VCSSVN_OBJS = vcs-svn/string_pool.o vcs-svn/line_buffer.o \
- vcs-svn/repo_tree.o vcs-svn/fast_export.o vcs-svn/svndump.o
-VCSSVN_TEST_OBJS = test-obj-pool.o test-string-pool.o \
- test-line-buffer.o test-treap.o
+
+XDIFF_OBJS += xdiff/xdiffi.o
+XDIFF_OBJS += xdiff/xprepare.o
+XDIFF_OBJS += xdiff/xutils.o
+XDIFF_OBJS += xdiff/xemit.o
+XDIFF_OBJS += xdiff/xmerge.o
+XDIFF_OBJS += xdiff/xpatience.o
+XDIFF_OBJS += xdiff/xhistogram.o
+
+VCSSVN_OBJS += vcs-svn/line_buffer.o
+VCSSVN_OBJS += vcs-svn/sliding_window.o
+VCSSVN_OBJS += vcs-svn/repo_tree.o
+VCSSVN_OBJS += vcs-svn/fast_export.o
+VCSSVN_OBJS += vcs-svn/svndiff.o
+VCSSVN_OBJS += vcs-svn/svndump.o
+
+VCSSVN_TEST_OBJS += test-line-buffer.o
+
OBJECTS := $(GIT_OBJS) $(XDIFF_OBJS) $(VCSSVN_OBJS)
dep_files := $(foreach f,$(OBJECTS),$(dir $f).depend/$(notdir $f).d)
@@ -2112,16 +2134,25 @@ connect.o transport.o url.o http-backend.o: url.h
http-fetch.o http-walker.o remote-curl.o transport.o walker.o: walker.h
http.o http-walker.o http-push.o http-fetch.o remote-curl.o: http.h url.h
-xdiff-interface.o $(XDIFF_OBJS): \
- xdiff/xinclude.h xdiff/xmacros.h xdiff/xdiff.h xdiff/xtypes.h \
- xdiff/xutils.h xdiff/xprepare.h xdiff/xdiffi.h xdiff/xemit.h
+XDIFF_H += xdiff/xinclude.h
+XDIFF_H += xdiff/xmacros.h
+XDIFF_H += xdiff/xdiff.h
+XDIFF_H += xdiff/xtypes.h
+XDIFF_H += xdiff/xutils.h
+XDIFF_H += xdiff/xprepare.h
+XDIFF_H += xdiff/xdiffi.h
+XDIFF_H += xdiff/xemit.h
-$(VCSSVN_OBJS) $(VCSSVN_TEST_OBJS): $(LIB_H) \
- vcs-svn/obj_pool.h vcs-svn/trp.h vcs-svn/string_pool.h \
- vcs-svn/line_buffer.h vcs-svn/repo_tree.h vcs-svn/fast_export.h \
- vcs-svn/svndump.h
+xdiff-interface.o $(XDIFF_OBJS): $(XDIFF_H)
-test-svn-fe.o: vcs-svn/svndump.h
+VCSSVN_H += vcs-svn/line_buffer.h
+VCSSVN_H += vcs-svn/sliding_window.h
+VCSSVN_H += vcs-svn/repo_tree.h
+VCSSVN_H += vcs-svn/fast_export.h
+VCSSVN_H += vcs-svn/svndiff.h
+VCSSVN_H += vcs-svn/svndump.h
+
+$(VCSSVN_OBJS) $(VCSSVN_TEST_OBJS): $(LIB_H) $(VCSSVN_H)
endif
exec_cmd.sp exec_cmd.s exec_cmd.o: EXTRA_CPPFLAGS = \
@@ -2264,7 +2295,7 @@ cscope:
### Detect prefix changes
TRACK_CFLAGS = $(CC):$(subst ','\'',$(ALL_CFLAGS)):\
$(bindir_SQ):$(gitexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):\
- $(localedir_SQ)
+ $(localedir_SQ):$(USE_GETTEXT_SCHEME)
GIT-CFLAGS: FORCE
@FLAGS='$(TRACK_CFLAGS)'; \
@@ -2349,8 +2380,6 @@ test-line-buffer$X: vcs-svn/lib.a
test-parse-options$X: parse-options.o parse-options-cb.o
-test-string-pool$X: vcs-svn/lib.a
-
test-svn-fe$X: vcs-svn/lib.a
.PRECIOUS: $(TEST_OBJS)
diff --git a/RelNotes b/RelNotes
index 766bbaf8f5..2c2a169555 120000
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/1.7.9.txt \ No newline at end of file
+Documentation/RelNotes/1.7.10.txt \ No newline at end of file
diff --git a/advice.c b/advice.c
index e02e632df3..01130e54e7 100644
--- a/advice.c
+++ b/advice.c
@@ -21,11 +21,21 @@ static struct {
void advise(const char *advice, ...)
{
+ struct strbuf buf = STRBUF_INIT;
va_list params;
+ const char *cp, *np;
va_start(params, advice);
- vreportf("hint: ", advice, params);
+ strbuf_addf(&buf, advice, params);
va_end(params);
+
+ for (cp = buf.buf; *cp; cp = np) {
+ np = strchrnul(cp, '\n');
+ fprintf(stderr, _("hint: %.*s\n"), (int)(np - cp), cp);
+ if (*np)
+ np++;
+ }
+ strbuf_release(&buf);
}
int git_default_advice_config(const char *var, const char *value)
@@ -46,16 +56,15 @@ int git_default_advice_config(const char *var, const char *value)
int error_resolve_conflict(const char *me)
{
error("'%s' is not possible because you have unmerged files.", me);
- if (advice_resolve_conflict) {
+ if (advice_resolve_conflict)
/*
* Message used both when 'git commit' fails and when
* other commands doing a merge do.
*/
- advise("Fix them up in the work tree,");
- advise("and then use 'git add/rm <file>' as");
- advise("appropriate to mark resolution and make a commit,");
- advise("or use 'git commit -a'.");
- }
+ advise(_("Fix them up in the work tree,\n"
+ "and then use 'git add/rm <file>' as\n"
+ "appropriate to mark resolution and make a commit,\n"
+ "or use 'git commit -a'."));
return -1;
}
@@ -64,3 +73,17 @@ void NORETURN die_resolve_conflict(const char *me)
error_resolve_conflict(me);
die("Exiting because of an unresolved conflict.");
}
+
+void detach_advice(const char *new_name)
+{
+ const char fmt[] =
+ "Note: checking out '%s'.\n\n"
+ "You are in 'detached HEAD' state. You can look around, make experimental\n"
+ "changes and commit them, and you can discard any commits you make in this\n"
+ "state without impacting any branches by performing another checkout.\n\n"
+ "If you want to create a new branch to retain commits you create, you may\n"
+ "do so (now or later) by using -b with the checkout command again. Example:\n\n"
+ " git checkout -b new_branch_name\n\n";
+
+ fprintf(stderr, fmt, new_name);
+}
diff --git a/advice.h b/advice.h
index e5d0af782b..7bda45b83e 100644
--- a/advice.h
+++ b/advice.h
@@ -14,5 +14,6 @@ int git_default_advice_config(const char *var, const char *value);
void advise(const char *advice, ...);
int error_resolve_conflict(const char *me);
extern void NORETURN die_resolve_conflict(const char *me);
+void detach_advice(const char *new_name);
#endif /* ADVICE_H */
diff --git a/builtin/add.c b/builtin/add.c
index 1c42900ff8..b79336d712 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -280,6 +280,7 @@ static int edit_patch(int argc, const char **argv, const char *prefix)
argc = setup_revisions(argc, argv, &rev, NULL);
rev.diffopt.output_format = DIFF_FORMAT_PATCH;
+ DIFF_OPT_SET(&rev.diffopt, IGNORE_DIRTY_SUBMODULES);
out = open(file, O_CREAT | O_WRONLY, 0644);
if (out < 0)
die (_("Could not open '%s' for writing."), file);
diff --git a/builtin/branch.c b/builtin/branch.c
index 7095718c13..cb17bc3675 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -768,6 +768,8 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
with_commit, argv);
else if (edit_description) {
const char *branch_name;
+ struct strbuf branch_ref = STRBUF_INIT;
+
if (detached)
die("Cannot give description to detached HEAD");
if (!argc)
@@ -776,6 +778,19 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
branch_name = argv[0];
else
usage_with_options(builtin_branch_usage, options);
+
+ strbuf_addf(&branch_ref, "refs/heads/%s", branch_name);
+ if (!ref_exists(branch_ref.buf)) {
+ strbuf_release(&branch_ref);
+
+ if (!argc)
+ return error("No commit on branch '%s' yet.",
+ branch_name);
+ else
+ return error("No such branch '%s'.", branch_name);
+ }
+ strbuf_release(&branch_ref);
+
if (edit_branch_description(branch_name))
return 1;
} else if (rename) {
diff --git a/builtin/checkout.c b/builtin/checkout.c
index f1984d9933..5bf96ba4d4 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -514,20 +514,6 @@ static void report_tracking(struct branch_info *new)
strbuf_release(&sb);
}
-static void detach_advice(const char *old_path, const char *new_name)
-{
- const char fmt[] =
- "Note: checking out '%s'.\n\n"
- "You are in 'detached HEAD' state. You can look around, make experimental\n"
- "changes and commit them, and you can discard any commits you make in this\n"
- "state without impacting any branches by performing another checkout.\n\n"
- "If you want to create a new branch to retain commits you create, you may\n"
- "do so (now or later) by using -b with the checkout command again. Example:\n\n"
- " git checkout -b new_branch_name\n\n";
-
- fprintf(stderr, fmt, new_name);
-}
-
static void update_refs_for_switch(struct checkout_opts *opts,
struct branch_info *old,
struct branch_info *new)
@@ -575,7 +561,7 @@ static void update_refs_for_switch(struct checkout_opts *opts,
REF_NODEREF, DIE_ON_ERR);
if (!opts->quiet) {
if (old->path && advice_detached_head)
- detach_advice(old->path, new->name);
+ detach_advice(new->name);
describe_detached_head(_("HEAD is now at"), new->commit);
}
} else if (new->path) { /* Switch branches. */
diff --git a/builtin/clone.c b/builtin/clone.c
index 86db954730..c62d4b5737 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -37,7 +37,7 @@ static const char * const builtin_clone_usage[] = {
NULL
};
-static int option_no_checkout, option_bare, option_mirror;
+static int option_no_checkout, option_bare, option_mirror, option_single_branch = -1;
static int option_local, option_no_hardlinks, option_shared, option_recursive;
static char *option_template, *option_depth;
static char *option_origin = NULL;
@@ -92,6 +92,8 @@ static struct option builtin_clone_options[] = {
"path to git-upload-pack on the remote"),
OPT_STRING(0, "depth", &option_depth, "depth",
"create a shallow clone of that depth"),
+ OPT_BOOL(0, "single-branch", &option_single_branch,
+ "clone only one branch, HEAD or --branch"),
OPT_STRING(0, "separate-git-dir", &real_git_dir, "gitdir",
"separate git dir from working tree"),
OPT_STRING_LIST('c', "config", &option_config, "key=value",
@@ -361,13 +363,8 @@ static void copy_or_link_directory(struct strbuf *src, struct strbuf *dest,
closedir(dir);
}
-static const struct ref *clone_local(const char *src_repo,
- const char *dest_repo)
+static void clone_local(const char *src_repo, const char *dest_repo)
{
- const struct ref *ret;
- struct remote *remote;
- struct transport *transport;
-
if (option_shared) {
struct strbuf alt = STRBUF_INIT;
strbuf_addf(&alt, "%s/objects", src_repo);
@@ -383,13 +380,8 @@ static const struct ref *clone_local(const char *src_repo,
strbuf_release(&dest);
}
- remote = remote_get(src_repo);
- transport = transport_get(remote, src_repo);
- ret = transport_get_remote_refs(transport);
- transport_disconnect(transport);
if (0 <= option_verbosity)
printf(_("done.\n"));
- return ret;
}
static const char *junk_work_tree;
@@ -420,6 +412,26 @@ static void remove_junk_on_signal(int signo)
raise(signo);
}
+static struct ref *find_remote_branch(const struct ref *refs, const char *branch)
+{
+ struct ref *ref;
+ struct strbuf head = STRBUF_INIT;
+ strbuf_addstr(&head, "refs/heads/");
+ strbuf_addstr(&head, branch);
+ ref = find_ref_by_name(refs, head.buf);
+ strbuf_release(&head);
+
+ if (ref)
+ return ref;
+
+ strbuf_addstr(&head, "refs/tags/");
+ strbuf_addstr(&head, branch);
+ ref = find_ref_by_name(refs, head.buf);
+ strbuf_release(&head);
+
+ return ref;
+}
+
static struct ref *wanted_peer_refs(const struct ref *refs,
struct refspec *refspec)
{
@@ -427,8 +439,27 @@ static struct ref *wanted_peer_refs(const struct ref *refs,
struct ref *local_refs = head;
struct ref **tail = head ? &head->next : &local_refs;
- get_fetch_map(refs, refspec, &tail, 0);
- if (!option_mirror)
+ if (option_single_branch) {
+ struct ref *remote_head = NULL;
+
+ if (!option_branch)
+ remote_head = guess_remote_head(head, refs, 0);
+ else
+ remote_head = find_remote_branch(refs, option_branch);
+
+ if (!remote_head && option_branch)
+ warning(_("Could not find remote branch %s to clone."),
+ option_branch);
+ else {
+ get_fetch_map(remote_head, refspec, &tail, 0);
+
+ /* if --branch=tag, pull the requested tag explicitly */
+ get_fetch_map(remote_head, tag_refspec, &tail, 0);
+ }
+ } else
+ get_fetch_map(refs, refspec, &tail, 0);
+
+ if (!option_mirror && !option_single_branch)
get_fetch_map(refs, tag_refspec, &tail, 0);
return local_refs;
@@ -441,11 +472,135 @@ static void write_remote_refs(const struct ref *local_refs)
for (r = local_refs; r; r = r->next) {
if (!r->peer_ref)
continue;
- add_extra_ref(r->peer_ref->name, r->old_sha1, 0);
+ add_packed_ref(r->peer_ref->name, r->old_sha1);
}
pack_refs(PACK_REFS_ALL);
- clear_extra_refs();
+}
+
+static void write_followtags(const struct ref *refs, const char *msg)
+{
+ const struct ref *ref;
+ for (ref = refs; ref; ref = ref->next) {
+ if (prefixcmp(ref->name, "refs/tags/"))
+ continue;
+ if (!suffixcmp(ref->name, "^{}"))
+ continue;
+ if (!has_sha1_file(ref->old_sha1))
+ continue;
+ update_ref(msg, ref->name, ref->old_sha1,
+ NULL, 0, DIE_ON_ERR);
+ }
+}
+
+static void update_remote_refs(const struct ref *refs,
+ const struct ref *mapped_refs,
+ const struct ref *remote_head_points_at,
+ const char *branch_top,
+ const char *msg)
+{
+ if (refs) {
+ clear_extra_refs();
+ write_remote_refs(mapped_refs);
+ if (option_single_branch)
+ write_followtags(refs, msg);
+ }
+
+ if (remote_head_points_at && !option_bare) {
+ struct strbuf head_ref = STRBUF_INIT;
+ strbuf_addstr(&head_ref, branch_top);
+ strbuf_addstr(&head_ref, "HEAD");
+ create_symref(head_ref.buf,
+ remote_head_points_at->peer_ref->name,
+ msg);
+ }
+}
+
+static void update_head(const struct ref *our, const struct ref *remote,
+ const char *msg)
+{
+ if (our && !prefixcmp(our->name, "refs/heads/")) {
+ /* Local default branch link */
+ create_symref("HEAD", our->name, NULL);
+ if (!option_bare) {
+ const char *head = skip_prefix(our->name, "refs/heads/");
+ update_ref(msg, "HEAD", our->old_sha1, NULL, 0, DIE_ON_ERR);
+ install_branch_config(0, head, option_origin, our->name);
+ }
+ } else if (our) {
+ struct commit *c = lookup_commit_reference(our->old_sha1);
+ /* --branch specifies a non-branch (i.e. tags), detach HEAD */
+ update_ref(msg, "HEAD", c->object.sha1,
+ NULL, REF_NODEREF, DIE_ON_ERR);
+ } else if (remote) {
+ /*
+ * We know remote HEAD points to a non-branch, or
+ * HEAD points to a branch but we don't know which one.
+ * Detach HEAD in all these cases.
+ */
+ update_ref(msg, "HEAD", remote->old_sha1,
+ NULL, REF_NODEREF, DIE_ON_ERR);
+ }
+}
+
+static int checkout(void)
+{
+ unsigned char sha1[20];
+ char *head;
+ struct lock_file *lock_file;
+ struct unpack_trees_options opts;
+ struct tree *tree;
+ struct tree_desc t;
+ int err = 0, fd;
+
+ if (option_no_checkout)
+ return 0;
+
+ head = resolve_refdup("HEAD", sha1, 1, NULL);
+ if (!head) {
+ warning(_("remote HEAD refers to nonexistent ref, "
+ "unable to checkout.\n"));
+ return 0;
+ }
+ if (!strcmp(head, "HEAD")) {
+ if (advice_detached_head)
+ detach_advice(sha1_to_hex(sha1));
+ } else {
+ if (prefixcmp(head, "refs/heads/"))
+ die(_("HEAD not found below refs/heads!"));
+ }
+ free(head);
+
+ /* We need to be in the new work tree for the checkout */
+ setup_work_tree();
+
+ lock_file = xcalloc(1, sizeof(struct lock_file));
+ fd = hold_locked_index(lock_file, 1);
+
+ memset(&opts, 0, sizeof opts);
+ opts.update = 1;
+ opts.merge = 1;
+ opts.fn = oneway_merge;
+ opts.verbose_update = (option_verbosity > 0);
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+
+ tree = parse_tree_indirect(sha1);
+ parse_tree(tree);
+ init_tree_desc(&t, tree->buffer, tree->size);
+ unpack_trees(1, &t, &opts);
+
+ if (write_cache(fd, active_cache, active_nr) ||
+ commit_locked_index(lock_file))
+ die(_("unable to write new index file"));
+
+ err |= run_hook(NULL, "post-checkout", sha1_to_hex(null_sha1),
+ sha1_to_hex(sha1), "1", NULL);
+
+ if (!err && option_recursive)
+ err = run_command_v_opt(argv_submodule, RUN_GIT_CMD);
+
+ return err;
}
static int write_one_config(const char *key, const char *value, void *data)
@@ -475,11 +630,13 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
const struct ref *remote_head_points_at;
const struct ref *our_head_points_at;
struct ref *mapped_refs;
+ const struct ref *ref;
struct strbuf key = STRBUF_INIT, value = STRBUF_INIT;
struct strbuf branch_top = STRBUF_INIT, reflog_msg = STRBUF_INIT;
struct transport *transport = NULL;
- char *src_ref_prefix = "refs/heads/";
- int err = 0;
+ const char *src_ref_prefix = "refs/heads/";
+ struct remote *remote;
+ int err = 0, complete_refs_before_fetch = 1;
struct refspec *refspec;
const char *fetch_pattern;
@@ -498,6 +655,9 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
usage_msg_opt(_("You must specify a repository to clone."),
builtin_clone_usage, builtin_clone_options);
+ if (option_single_branch == -1)
+ option_single_branch = option_depth ? 1 : 0;
+
if (option_mirror)
option_bare = 1;
@@ -630,13 +790,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
strbuf_reset(&value);
- if (is_local) {
- refs = clone_local(path, git_dir);
- mapped_refs = wanted_peer_refs(refs, refspec);
- } else {
- struct remote *remote = remote_get(option_origin);
- transport = transport_get(remote, remote->url[0]);
+ remote = remote_get(option_origin);
+ transport = transport_get(remote, remote->url[0]);
+ if (!is_local) {
if (!transport->get_refs_list || !transport->fetch)
die(_("Don't know how to clone %s"), transport->url);
@@ -645,43 +802,50 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
if (option_depth)
transport_set_option(transport, TRANS_OPT_DEPTH,
option_depth);
+ if (option_single_branch)
+ transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
transport_set_verbosity(transport, option_verbosity, option_progress);
if (option_upload_pack)
transport_set_option(transport, TRANS_OPT_UPLOADPACK,
option_upload_pack);
-
- refs = transport_get_remote_refs(transport);
- if (refs) {
- mapped_refs = wanted_peer_refs(refs, refspec);
- transport_fetch_refs(transport, mapped_refs);
- }
}
- if (refs) {
- clear_extra_refs();
+ refs = transport_get_remote_refs(transport);
+ mapped_refs = refs ? wanted_peer_refs(refs, refspec) : NULL;
- write_remote_refs(mapped_refs);
+ /*
+ * transport_get_remote_refs() may return refs with null sha-1
+ * in mapped_refs (see struct transport->get_refs_list
+ * comment). In that case we need fetch it early because
+ * remote_head code below relies on it.
+ *
+ * for normal clones, transport_get_remote_refs() should
+ * return reliable ref set, we can delay cloning until after
+ * remote HEAD check.
+ */
+ for (ref = refs; ref; ref = ref->next)
+ if (is_null_sha1(ref->old_sha1)) {
+ complete_refs_before_fetch = 0;
+ break;
+ }
+ if (!is_local && !complete_refs_before_fetch && refs)
+ transport_fetch_refs(transport, mapped_refs);
+
+ if (refs) {
remote_head = find_ref_by_name(refs, "HEAD");
remote_head_points_at =
guess_remote_head(remote_head, mapped_refs, 0);
if (option_branch) {
- struct strbuf head = STRBUF_INIT;
- strbuf_addstr(&head, src_ref_prefix);
- strbuf_addstr(&head, option_branch);
our_head_points_at =
- find_ref_by_name(mapped_refs, head.buf);
- strbuf_release(&head);
-
- if (!our_head_points_at) {
- warning(_("Remote branch %s not found in "
- "upstream %s, using HEAD instead"),
- option_branch, option_origin);
- our_head_points_at = remote_head_points_at;
- }
+ find_remote_branch(mapped_refs, option_branch);
+
+ if (!our_head_points_at)
+ die(_("Remote branch %s not found in upstream %s"),
+ option_branch, option_origin);
}
else
our_head_points_at = remote_head_points_at;
@@ -697,84 +861,20 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
"refs/heads/master");
}
- if (remote_head_points_at && !option_bare) {
- struct strbuf head_ref = STRBUF_INIT;
- strbuf_addstr(&head_ref, branch_top.buf);
- strbuf_addstr(&head_ref, "HEAD");
- create_symref(head_ref.buf,
- remote_head_points_at->peer_ref->name,
- reflog_msg.buf);
- }
+ if (is_local)
+ clone_local(path, git_dir);
+ else if (refs && complete_refs_before_fetch)
+ transport_fetch_refs(transport, mapped_refs);
- if (our_head_points_at) {
- /* Local default branch link */
- create_symref("HEAD", our_head_points_at->name, NULL);
- if (!option_bare) {
- const char *head = skip_prefix(our_head_points_at->name,
- "refs/heads/");
- update_ref(reflog_msg.buf, "HEAD",
- our_head_points_at->old_sha1,
- NULL, 0, DIE_ON_ERR);
- install_branch_config(0, head, option_origin,
- our_head_points_at->name);
- }
- } else if (remote_head) {
- /* Source had detached HEAD pointing somewhere. */
- if (!option_bare) {
- update_ref(reflog_msg.buf, "HEAD",
- remote_head->old_sha1,
- NULL, REF_NODEREF, DIE_ON_ERR);
- our_head_points_at = remote_head;
- }
- } else {
- /* Nothing to checkout out */
- if (!option_no_checkout)
- warning(_("remote HEAD refers to nonexistent ref, "
- "unable to checkout.\n"));
- option_no_checkout = 1;
- }
+ update_remote_refs(refs, mapped_refs, remote_head_points_at,
+ branch_top.buf, reflog_msg.buf);
- if (transport) {
- transport_unlock_pack(transport);
- transport_disconnect(transport);
- }
+ update_head(our_head_points_at, remote_head, reflog_msg.buf);
- if (!option_no_checkout) {
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
- struct unpack_trees_options opts;
- struct tree *tree;
- struct tree_desc t;
- int fd;
-
- /* We need to be in the new work tree for the checkout */
- setup_work_tree();
-
- fd = hold_locked_index(lock_file, 1);
-
- memset(&opts, 0, sizeof opts);
- opts.update = 1;
- opts.merge = 1;
- opts.fn = oneway_merge;
- opts.verbose_update = (option_verbosity > 0);
- opts.src_index = &the_index;
- opts.dst_index = &the_index;
-
- tree = parse_tree_indirect(our_head_points_at->old_sha1);
- parse_tree(tree);
- init_tree_desc(&t, tree->buffer, tree->size);
- unpack_trees(1, &t, &opts);
-
- if (write_cache(fd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
- die(_("unable to write new index file"));
-
- err |= run_hook(NULL, "post-checkout", sha1_to_hex(null_sha1),
- sha1_to_hex(our_head_points_at->old_sha1), "1",
- NULL);
-
- if (!err && option_recursive)
- err = run_command_v_opt(argv_submodule, RUN_GIT_CMD);
- }
+ transport_unlock_pack(transport);
+ transport_disconnect(transport);
+
+ err = checkout();
strbuf_release(&reflog_msg);
strbuf_release(&branch_top);
diff --git a/builtin/commit.c b/builtin/commit.c
index bf42bb384d..3714582e19 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -196,16 +196,16 @@ static void determine_whence(struct wt_status *s)
static const char *whence_s(void)
{
- char *s = "";
+ const char *s = "";
switch (whence) {
case FROM_COMMIT:
break;
case FROM_MERGE:
- s = "merge";
+ s = _("merge");
break;
case FROM_CHERRY_PICK:
- s = "cherry-pick";
+ s = _("cherry-pick");
break;
}
@@ -543,6 +543,7 @@ static void determine_author_info(struct strbuf *author_ident)
if (author_message) {
const char *a, *lb, *rb, *eol;
+ size_t len;
a = strstr(author_message_buffer, "\nauthor ");
if (!a)
@@ -563,6 +564,11 @@ static void determine_author_info(struct strbuf *author_ident)
(a + strlen("\nauthor "))));
email = xmemdupz(lb + strlen("<"), rb - (lb + strlen("<")));
date = xmemdupz(rb + strlen("> "), eol - (rb + strlen("> ")));
+ len = eol - (rb + strlen("> "));
+ date = xmalloc(len + 2);
+ *date = '@';
+ memcpy(date + 1, rb + strlen("> "), len);
+ date[len + 1] = '\0';
}
if (force_author) {
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 0481c169ca..ab186332fa 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -585,7 +585,7 @@ static void find_non_local_tags(struct transport *transport,
for_each_ref(add_existing, &existing_refs);
for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) {
- if (prefixcmp(ref->name, "refs/tags"))
+ if (prefixcmp(ref->name, "refs/tags/"))
continue;
/*
diff --git a/builtin/grep.c b/builtin/grep.c
index 9ce064ac11..5c2ae94e55 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -1034,8 +1034,9 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
#ifndef NO_PTHREADS
if (use_threads) {
- if (opt.pre_context || opt.post_context || opt.file_break ||
- opt.funcbody)
+ if (!(opt.name_only || opt.unmatch_name_only || opt.count)
+ && (opt.pre_context || opt.post_context ||
+ opt.file_break || opt.funcbody))
skip_first_line = 1;
start_threads(&opt);
}
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index af7dc37a44..dd1c5c961d 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -34,6 +34,8 @@ struct base_data {
struct object_entry *obj;
void *data;
unsigned long size;
+ int ref_first, ref_last;
+ int ofs_first, ofs_last;
};
/*
@@ -221,6 +223,15 @@ static NORETURN void bad_object(unsigned long offset, const char *format, ...)
die("pack has bad object at offset %lu: %s", offset, buf);
}
+static struct base_data *alloc_base_data(void)
+{
+ struct base_data *base = xmalloc(sizeof(struct base_data));
+ memset(base, 0, sizeof(*base));
+ base->ref_last = -1;
+ base->ofs_last = -1;
+ return base;
+}
+
static void free_base_data(struct base_data *c)
{
if (c->data) {
@@ -504,14 +515,52 @@ static int is_delta_type(enum object_type type)
return (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA);
}
+/*
+ * This function is part of find_unresolved_deltas(). There are two
+ * walkers going in the opposite ways.
+ *
+ * The first one in find_unresolved_deltas() traverses down from
+ * parent node to children, deflating nodes along the way. However,
+ * memory for deflated nodes is limited by delta_base_cache_limit, so
+ * at some point parent node's deflated content may be freed.
+ *
+ * The second walker is this function, which goes from current node up
+ * to top parent if necessary to deflate the node. In normal
+ * situation, its parent node would be already deflated, so it just
+ * needs to apply delta.
+ *
+ * In the worst case scenario, parent node is no longer deflated because
+ * we're running out of delta_base_cache_limit; we need to re-deflate
+ * parents, possibly up to the top base.
+ *
+ * All deflated objects here are subject to be freed if we exceed
+ * delta_base_cache_limit, just like in find_unresolved_deltas(), we
+ * just need to make sure the last node is not freed.
+ */
static void *get_base_data(struct base_data *c)
{
if (!c->data) {
struct object_entry *obj = c->obj;
+ struct base_data **delta = NULL;
+ int delta_nr = 0, delta_alloc = 0;
- if (is_delta_type(obj->type)) {
- void *base = get_base_data(c->base);
- void *raw = get_data_from_pack(obj);
+ while (is_delta_type(c->obj->type) && !c->data) {
+ ALLOC_GROW(delta, delta_nr + 1, delta_alloc);
+ delta[delta_nr++] = c;
+ c = c->base;
+ }
+ if (!delta_nr) {
+ c->data = get_data_from_pack(obj);
+ c->size = obj->size;
+ base_cache_used += c->size;
+ prune_base_data(c);
+ }
+ for (; delta_nr > 0; delta_nr--) {
+ void *base, *raw;
+ c = delta[delta_nr - 1];
+ obj = c->obj;
+ base = get_base_data(c->base);
+ raw = get_data_from_pack(obj);
c->data = patch_delta(
base, c->base->size,
raw, obj->size,
@@ -519,13 +568,10 @@ static void *get_base_data(struct base_data *c)
free(raw);
if (!c->data)
bad_object(obj->idx.offset, "failed to apply delta");
- } else {
- c->data = get_data_from_pack(obj);
- c->size = obj->size;
+ base_cache_used += c->size;
+ prune_base_data(c);
}
-
- base_cache_used += c->size;
- prune_base_data(c);
+ free(delta);
}
return c->data;
}
@@ -553,58 +599,76 @@ static void resolve_delta(struct object_entry *delta_obj,
nr_resolved_deltas++;
}
-static void find_unresolved_deltas(struct base_data *base,
- struct base_data *prev_base)
+static struct base_data *find_unresolved_deltas_1(struct base_data *base,
+ struct base_data *prev_base)
{
- int i, ref_first, ref_last, ofs_first, ofs_last;
-
- /*
- * This is a recursive function. Those brackets should help reducing
- * stack usage by limiting the scope of the delta_base union.
- */
- {
+ if (base->ref_last == -1 && base->ofs_last == -1) {
union delta_base base_spec;
hashcpy(base_spec.sha1, base->obj->idx.sha1);
find_delta_children(&base_spec,
- &ref_first, &ref_last, OBJ_REF_DELTA);
+ &base->ref_first, &base->ref_last, OBJ_REF_DELTA);
memset(&base_spec, 0, sizeof(base_spec));
base_spec.offset = base->obj->idx.offset;
find_delta_children(&base_spec,
- &ofs_first, &ofs_last, OBJ_OFS_DELTA);
- }
+ &base->ofs_first, &base->ofs_last, OBJ_OFS_DELTA);
- if (ref_last == -1 && ofs_last == -1) {
- free(base->data);
- return;
- }
+ if (base->ref_last == -1 && base->ofs_last == -1) {
+ free(base->data);
+ return NULL;
+ }
- link_base_data(prev_base, base);
+ link_base_data(prev_base, base);
+ }
- for (i = ref_first; i <= ref_last; i++) {
- struct object_entry *child = objects + deltas[i].obj_no;
- struct base_data result;
+ if (base->ref_first <= base->ref_last) {
+ struct object_entry *child = objects + deltas[base->ref_first].obj_no;
+ struct base_data *result = alloc_base_data();
assert(child->real_type == OBJ_REF_DELTA);
- resolve_delta(child, base, &result);
- if (i == ref_last && ofs_last == -1)
+ resolve_delta(child, base, result);
+ if (base->ref_first == base->ref_last && base->ofs_last == -1)
free_base_data(base);
- find_unresolved_deltas(&result, base);
+
+ base->ref_first++;
+ return result;
}
- for (i = ofs_first; i <= ofs_last; i++) {
- struct object_entry *child = objects + deltas[i].obj_no;
- struct base_data result;
+ if (base->ofs_first <= base->ofs_last) {
+ struct object_entry *child = objects + deltas[base->ofs_first].obj_no;
+ struct base_data *result = alloc_base_data();
assert(child->real_type == OBJ_OFS_DELTA);
- resolve_delta(child, base, &result);
- if (i == ofs_last)
+ resolve_delta(child, base, result);
+ if (base->ofs_first == base->ofs_last)
free_base_data(base);
- find_unresolved_deltas(&result, base);
+
+ base->ofs_first++;
+ return result;
}
unlink_base_data(base);
+ return NULL;
+}
+
+static void find_unresolved_deltas(struct base_data *base)
+{
+ struct base_data *new_base, *prev_base = NULL;
+ for (;;) {
+ new_base = find_unresolved_deltas_1(base, prev_base);
+
+ if (new_base) {
+ prev_base = base;
+ base = new_base;
+ } else {
+ free(base);
+ base = prev_base;
+ if (!base)
+ return;
+ prev_base = base->base;
+ }
+ }
}
static int compare_delta_entry(const void *a, const void *b)
@@ -684,13 +748,13 @@ static void parse_pack_objects(unsigned char *sha1)
progress = start_progress("Resolving deltas", nr_deltas);
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
- struct base_data base_obj;
+ struct base_data *base_obj = alloc_base_data();
if (is_delta_type(obj->type))
continue;
- base_obj.obj = obj;
- base_obj.data = NULL;
- find_unresolved_deltas(&base_obj, NULL);
+ base_obj->obj = obj;
+ base_obj->data = NULL;
+ find_unresolved_deltas(base_obj);
display_progress(progress, nr_resolved_deltas);
}
}
@@ -783,20 +847,20 @@ static void fix_unresolved_deltas(struct sha1file *f, int nr_unresolved)
for (i = 0; i < n; i++) {
struct delta_entry *d = sorted_by_pos[i];
enum object_type type;
- struct base_data base_obj;
+ struct base_data *base_obj = alloc_base_data();
if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
continue;
- base_obj.data = read_sha1_file(d->base.sha1, &type, &base_obj.size);
- if (!base_obj.data)
+ base_obj->data = read_sha1_file(d->base.sha1, &type, &base_obj->size);
+ if (!base_obj->data)
continue;
- if (check_sha1_signature(d->base.sha1, base_obj.data,
- base_obj.size, typename(type)))
+ if (check_sha1_signature(d->base.sha1, base_obj->data,
+ base_obj->size, typename(type)))
die("local object %s is corrupt", sha1_to_hex(d->base.sha1));
- base_obj.obj = append_obj_to_pack(f, d->base.sha1,
- base_obj.data, base_obj.size, type);
- find_unresolved_deltas(&base_obj, NULL);
+ base_obj->obj = append_obj_to_pack(f, d->base.sha1,
+ base_obj->data, base_obj->size, type);
+ find_unresolved_deltas(base_obj);
display_progress(progress, nr_resolved_deltas);
}
free(sorted_by_pos);
diff --git a/builtin/mailinfo.c b/builtin/mailinfo.c
index bfb32b7233..eaf9e157a3 100644
--- a/builtin/mailinfo.c
+++ b/builtin/mailinfo.c
@@ -250,8 +250,17 @@ static void cleanup_subject(struct strbuf *subject)
(7 <= remove &&
memmem(subject->buf + at, remove, "PATCH", 5)))
strbuf_remove(subject, at, remove);
- else
+ else {
at += remove;
+ /*
+ * If the input had a space after the ], keep
+ * it. We don't bother with finding the end of
+ * the space, since we later normalize it
+ * anyway.
+ */
+ if (isspace(subject->buf[at]))
+ at += 1;
+ }
continue;
}
break;
diff --git a/builtin/merge.c b/builtin/merge.c
index 3a451727d0..ed0f959ac4 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -48,7 +48,7 @@ static const char * const builtin_merge_usage[] = {
static int show_diffstat = 1, shortlog_len = -1, squash;
static int option_commit = 1, allow_fast_forward = 1;
-static int fast_forward_only, option_edit;
+static int fast_forward_only, option_edit = -1;
static int allow_trivial = 1, have_message;
static int overwrite_ignore = 1;
static struct strbuf merge_msg = STRBUF_INIT;
@@ -193,7 +193,7 @@ static struct option builtin_merge_options[] = {
"create a single commit instead of doing a merge"),
OPT_BOOLEAN(0, "commit", &option_commit,
"perform a commit if the merge succeeds (default)"),
- OPT_BOOLEAN('e', "edit", &option_edit,
+ OPT_BOOL('e', "edit", &option_edit,
"edit message before committing"),
OPT_BOOLEAN(0, "ff", &allow_fast_forward,
"allow fast-forward (default)"),
@@ -885,20 +885,30 @@ static void abort_commit(const char *err_msg)
exit(1);
}
+static const char merge_editor_comment[] =
+N_("Please enter a commit message to explain why this merge is necessary,\n"
+ "especially if it merges an updated upstream into a topic branch.\n"
+ "\n"
+ "Lines starting with '#' will be ignored, and an empty message aborts\n"
+ "the commit.\n");
+
static void prepare_to_commit(void)
{
struct strbuf msg = STRBUF_INIT;
+ const char *comment = _(merge_editor_comment);
strbuf_addbuf(&msg, &merge_msg);
strbuf_addch(&msg, '\n');
+ if (0 < option_edit)
+ strbuf_add_lines(&msg, "# ", comment, strlen(comment));
write_merge_msg(&msg);
run_hook(get_index_file(), "prepare-commit-msg",
git_path("MERGE_MSG"), "merge", NULL, NULL);
- if (option_edit) {
+ if (0 < option_edit) {
if (launch_editor(git_path("MERGE_MSG"), NULL, NULL))
abort_commit(NULL);
}
read_merge_msg(&msg);
- stripspace(&msg, option_edit);
+ stripspace(&msg, 0 < option_edit);
if (!msg.len)
abort_commit(_("Empty commit message."));
strbuf_release(&merge_msg);
@@ -1099,6 +1109,33 @@ static void write_merge_state(void)
close(fd);
}
+static int default_edit_option(void)
+{
+ static const char name[] = "GIT_MERGE_AUTOEDIT";
+ const char *e = getenv(name);
+ struct stat st_stdin, st_stdout;
+
+ if (have_message)
+ /* an explicit -m msg without --[no-]edit */
+ return 0;
+
+ if (e) {
+ int v = git_config_maybe_bool(name, e);
+ if (v < 0)
+ die("Bad value '%s' in environment '%s'", e, name);
+ return v;
+ }
+
+ /* Use editor if stdin and stdout are the same and is a tty */
+ return (!fstat(0, &st_stdin) &&
+ !fstat(1, &st_stdout) &&
+ isatty(0) &&
+ st_stdin.st_dev == st_stdout.st_dev &&
+ st_stdin.st_ino == st_stdout.st_ino &&
+ st_stdin.st_mode == st_stdout.st_mode);
+}
+
+
int cmd_merge(int argc, const char **argv, const char *prefix)
{
unsigned char result_tree[20];
@@ -1283,14 +1320,19 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
sha1_to_hex(commit->object.sha1));
setenv(buf.buf, argv[i], 1);
strbuf_reset(&buf);
- if (merge_remote_util(commit) &&
+ if (!fast_forward_only &&
+ merge_remote_util(commit) &&
merge_remote_util(commit)->obj &&
merge_remote_util(commit)->obj->type == OBJ_TAG) {
- option_edit = 1;
+ if (option_edit < 0)
+ option_edit = 1;
allow_fast_forward = 0;
}
}
+ if (option_edit < 0)
+ option_edit = default_edit_option();
+
if (!use_strategies) {
if (!remoteheads->next)
add_strategies(pull_twohead, DEFAULT_TWOHEAD);
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index 8c9e91e78c..fa7448be5a 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -33,6 +33,7 @@ static int transfer_unpack_limit = -1;
static int unpack_limit = 100;
static int report_status;
static int use_sideband;
+static int quiet;
static int prefer_ofs_delta = 1;
static int auto_update_server_info;
static int auto_gc = 1;
@@ -122,7 +123,7 @@ static void show_ref(const char *path, const unsigned char *sha1)
else
packet_write(1, "%s %s%c%s%s\n",
sha1_to_hex(sha1), path, 0,
- " report-status delete-refs side-band-64k",
+ " report-status delete-refs side-band-64k quiet",
prefer_ofs_delta ? " ofs-delta" : "");
sent_capabilities = 1;
}
@@ -748,10 +749,13 @@ static struct command *read_head_info(void)
refname = line + 82;
reflen = strlen(refname);
if (reflen + 82 < len) {
- if (strstr(refname + reflen + 1, "report-status"))
+ const char *feature_list = refname + reflen + 1;
+ if (parse_feature_request(feature_list, "report-status"))
report_status = 1;
- if (strstr(refname + reflen + 1, "side-band-64k"))
+ if (parse_feature_request(feature_list, "side-band-64k"))
use_sideband = LARGE_PACKET_MAX;
+ if (parse_feature_request(feature_list, "quiet"))
+ quiet = 1;
}
cmd = xcalloc(1, sizeof(struct command) + len - 80);
hashcpy(cmd->old_sha1, old_sha1);
@@ -805,8 +809,10 @@ static const char *unpack(void)
if (ntohl(hdr.hdr_entries) < unpack_limit) {
int code, i = 0;
- const char *unpacker[4];
+ const char *unpacker[5];
unpacker[i++] = "unpack-objects";
+ if (quiet)
+ unpacker[i++] = "-q";
if (fsck_objects)
unpacker[i++] = "--strict";
unpacker[i++] = hdr_arg;
@@ -901,6 +907,11 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
const char *arg = *argv++;
if (*arg == '-') {
+ if (!strcmp(arg, "--quiet")) {
+ quiet = 1;
+ continue;
+ }
+
if (!strcmp(arg, "--advertise-refs")) {
advertise_refs = 1;
continue;
diff --git a/builtin/remote.c b/builtin/remote.c
index 583eec90e0..f54a89adc7 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -534,7 +534,7 @@ static int add_branch_for_removal(const char *refname,
}
/* don't delete non-remote-tracking refs */
- if (prefixcmp(refname, "refs/remotes")) {
+ if (prefixcmp(refname, "refs/remotes/")) {
/* advise user how to delete local branches */
if (!prefixcmp(refname, "refs/heads/"))
string_list_append(branches->skipped,
diff --git a/builtin/revert.c b/builtin/revert.c
index 0d8020cf64..e6840f23dc 100644
--- a/builtin/revert.c
+++ b/builtin/revert.c
@@ -1,18 +1,9 @@
#include "cache.h"
#include "builtin.h"
-#include "object.h"
-#include "commit.h"
-#include "tag.h"
-#include "run-command.h"
-#include "exec_cmd.h"
-#include "utf8.h"
#include "parse-options.h"
-#include "cache-tree.h"
#include "diff.h"
#include "revision.h"
#include "rerere.h"
-#include "merge-recursive.h"
-#include "refs.h"
#include "dir.h"
#include "sequencer.h"
@@ -39,49 +30,14 @@ static const char * const cherry_pick_usage[] = {
NULL
};
-enum replay_action { REVERT, CHERRY_PICK };
-enum replay_subcommand {
- REPLAY_NONE,
- REPLAY_REMOVE_STATE,
- REPLAY_CONTINUE,
- REPLAY_ROLLBACK
-};
-
-struct replay_opts {
- enum replay_action action;
- enum replay_subcommand subcommand;
-
- /* Boolean options */
- int edit;
- int record_origin;
- int no_commit;
- int signoff;
- int allow_ff;
- int allow_rerere_auto;
-
- int mainline;
-
- /* Merge strategy */
- const char *strategy;
- const char **xopts;
- size_t xopts_nr, xopts_alloc;
-
- /* Only used by REPLAY_NONE */
- struct rev_info *revs;
-};
-
-#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
-
static const char *action_name(const struct replay_opts *opts)
{
- return opts->action == REVERT ? "revert" : "cherry-pick";
+ return opts->action == REPLAY_REVERT ? "revert" : "cherry-pick";
}
-static char *get_encoding(const char *message);
-
static const char * const *revert_or_cherry_pick_usage(struct replay_opts *opts)
{
- return opts->action == REVERT ? revert_usage : cherry_pick_usage;
+ return opts->action == REPLAY_REVERT ? revert_usage : cherry_pick_usage;
}
static int option_parse_x(const struct option *opt,
@@ -160,7 +116,7 @@ static void parse_args(int argc, const char **argv, struct replay_opts *opts)
OPT_END(),
};
- if (opts->action == CHERRY_PICK) {
+ if (opts->action == REPLAY_PICK) {
struct option cp_extra[] = {
OPT_BOOLEAN('x', NULL, &opts->record_origin, "append commit name"),
OPT_BOOLEAN(0, "ff", &opts->allow_ff, "allow fast-forward"),
@@ -237,902 +193,6 @@ static void parse_args(int argc, const char **argv, struct replay_opts *opts)
usage_with_options(usage_str, options);
}
-struct commit_message {
- char *parent_label;
- const char *label;
- const char *subject;
- char *reencoded_message;
- const char *message;
-};
-
-static int get_message(struct commit *commit, struct commit_message *out)
-{
- const char *encoding;
- const char *abbrev, *subject;
- int abbrev_len, subject_len;
- char *q;
-
- if (!commit->buffer)
- return -1;
- encoding = get_encoding(commit->buffer);
- if (!encoding)
- encoding = "UTF-8";
- if (!git_commit_encoding)
- git_commit_encoding = "UTF-8";
-
- out->reencoded_message = NULL;
- out->message = commit->buffer;
- if (strcmp(encoding, git_commit_encoding))
- out->reencoded_message = reencode_string(commit->buffer,
- git_commit_encoding, encoding);
- if (out->reencoded_message)
- out->message = out->reencoded_message;
-
- abbrev = find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV);
- abbrev_len = strlen(abbrev);
-
- subject_len = find_commit_subject(out->message, &subject);
-
- out->parent_label = xmalloc(strlen("parent of ") + abbrev_len +
- strlen("... ") + subject_len + 1);
- q = out->parent_label;
- q = mempcpy(q, "parent of ", strlen("parent of "));
- out->label = q;
- q = mempcpy(q, abbrev, abbrev_len);
- q = mempcpy(q, "... ", strlen("... "));
- out->subject = q;
- q = mempcpy(q, subject, subject_len);
- *q = '\0';
- return 0;
-}
-
-static void free_message(struct commit_message *msg)
-{
- free(msg->parent_label);
- free(msg->reencoded_message);
-}
-
-static char *get_encoding(const char *message)
-{
- const char *p = message, *eol;
-
- while (*p && *p != '\n') {
- for (eol = p + 1; *eol && *eol != '\n'; eol++)
- ; /* do nothing */
- if (!prefixcmp(p, "encoding ")) {
- char *result = xmalloc(eol - 8 - p);
- strlcpy(result, p + 9, eol - 8 - p);
- return result;
- }
- p = eol;
- if (*p == '\n')
- p++;
- }
- return NULL;
-}
-
-static void write_cherry_pick_head(struct commit *commit, const char *pseudoref)
-{
- const char *filename;
- int fd;
- struct strbuf buf = STRBUF_INIT;
-
- strbuf_addf(&buf, "%s\n", sha1_to_hex(commit->object.sha1));
-
- filename = git_path("%s", pseudoref);
- fd = open(filename, O_WRONLY | O_CREAT, 0666);
- if (fd < 0)
- die_errno(_("Could not open '%s' for writing"), filename);
- if (write_in_full(fd, buf.buf, buf.len) != buf.len || close(fd))
- die_errno(_("Could not write to '%s'"), filename);
- strbuf_release(&buf);
-}
-
-static void print_advice(int show_hint)
-{
- char *msg = getenv("GIT_CHERRY_PICK_HELP");
-
- if (msg) {
- fprintf(stderr, "%s\n", msg);
- /*
- * A conflict has occured but the porcelain
- * (typically rebase --interactive) wants to take care
- * of the commit itself so remove CHERRY_PICK_HEAD
- */
- unlink(git_path("CHERRY_PICK_HEAD"));
- return;
- }
-
- if (show_hint) {
- advise("after resolving the conflicts, mark the corrected paths");
- advise("with 'git add <paths>' or 'git rm <paths>'");
- advise("and commit the result with 'git commit'");
- }
-}
-
-static void write_message(struct strbuf *msgbuf, const char *filename)
-{
- static struct lock_file msg_file;
-
- int msg_fd = hold_lock_file_for_update(&msg_file, filename,
- LOCK_DIE_ON_ERROR);
- if (write_in_full(msg_fd, msgbuf->buf, msgbuf->len) < 0)
- die_errno(_("Could not write to %s"), filename);
- strbuf_release(msgbuf);
- if (commit_lock_file(&msg_file) < 0)
- die(_("Error wrapping up %s"), filename);
-}
-
-static struct tree *empty_tree(void)
-{
- return lookup_tree((const unsigned char *)EMPTY_TREE_SHA1_BIN);
-}
-
-static int error_dirty_index(struct replay_opts *opts)
-{
- if (read_cache_unmerged())
- return error_resolve_conflict(action_name(opts));
-
- /* Different translation strings for cherry-pick and revert */
- if (opts->action == CHERRY_PICK)
- error(_("Your local changes would be overwritten by cherry-pick."));
- else
- error(_("Your local changes would be overwritten by revert."));
-
- if (advice_commit_before_merge)
- advise(_("Commit your changes or stash them to proceed."));
- return -1;
-}
-
-static int fast_forward_to(const unsigned char *to, const unsigned char *from)
-{
- struct ref_lock *ref_lock;
-
- read_cache();
- if (checkout_fast_forward(from, to))
- exit(1); /* the callee should have complained already */
- ref_lock = lock_any_ref_for_update("HEAD", from, 0);
- return write_ref_sha1(ref_lock, to, "cherry-pick");
-}
-
-static int do_recursive_merge(struct commit *base, struct commit *next,
- const char *base_label, const char *next_label,
- unsigned char *head, struct strbuf *msgbuf,
- struct replay_opts *opts)
-{
- struct merge_options o;
- struct tree *result, *next_tree, *base_tree, *head_tree;
- int clean, index_fd;
- const char **xopt;
- static struct lock_file index_lock;
-
- index_fd = hold_locked_index(&index_lock, 1);
-
- read_cache();
-
- init_merge_options(&o);
- o.ancestor = base ? base_label : "(empty tree)";
- o.branch1 = "HEAD";
- o.branch2 = next ? next_label : "(empty tree)";
-
- head_tree = parse_tree_indirect(head);
- next_tree = next ? next->tree : empty_tree();
- base_tree = base ? base->tree : empty_tree();
-
- for (xopt = opts->xopts; xopt != opts->xopts + opts->xopts_nr; xopt++)
- parse_merge_opt(&o, *xopt);
-
- clean = merge_trees(&o,
- head_tree,
- next_tree, base_tree, &result);
-
- if (active_cache_changed &&
- (write_cache(index_fd, active_cache, active_nr) ||
- commit_locked_index(&index_lock)))
- /* TRANSLATORS: %s will be "revert" or "cherry-pick" */
- die(_("%s: Unable to write new index file"), action_name(opts));
- rollback_lock_file(&index_lock);
-
- if (!clean) {
- int i;
- strbuf_addstr(msgbuf, "\nConflicts:\n\n");
- for (i = 0; i < active_nr;) {
- struct cache_entry *ce = active_cache[i++];
- if (ce_stage(ce)) {
- strbuf_addch(msgbuf, '\t');
- strbuf_addstr(msgbuf, ce->name);
- strbuf_addch(msgbuf, '\n');
- while (i < active_nr && !strcmp(ce->name,
- active_cache[i]->name))
- i++;
- }
- }
- }
-
- return !clean;
-}
-
-/*
- * If we are cherry-pick, and if the merge did not result in
- * hand-editing, we will hit this commit and inherit the original
- * author date and name.
- * If we are revert, or if our cherry-pick results in a hand merge,
- * we had better say that the current user is responsible for that.
- */
-static int run_git_commit(const char *defmsg, struct replay_opts *opts)
-{
- /* 6 is max possible length of our args array including NULL */
- const char *args[6];
- int i = 0;
-
- args[i++] = "commit";
- args[i++] = "-n";
- if (opts->signoff)
- args[i++] = "-s";
- if (!opts->edit) {
- args[i++] = "-F";
- args[i++] = defmsg;
- }
- args[i] = NULL;
-
- return run_command_v_opt(args, RUN_GIT_CMD);
-}
-
-static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
-{
- unsigned char head[20];
- struct commit *base, *next, *parent;
- const char *base_label, *next_label;
- struct commit_message msg = { NULL, NULL, NULL, NULL, NULL };
- char *defmsg = NULL;
- struct strbuf msgbuf = STRBUF_INIT;
- int res;
-
- if (opts->no_commit) {
- /*
- * We do not intend to commit immediately. We just want to
- * merge the differences in, so let's compute the tree
- * that represents the "current" state for merge-recursive
- * to work on.
- */
- if (write_cache_as_tree(head, 0, NULL))
- die (_("Your index file is unmerged."));
- } else {
- if (get_sha1("HEAD", head))
- return error(_("You do not have a valid HEAD"));
- if (index_differs_from("HEAD", 0))
- return error_dirty_index(opts);
- }
- discard_cache();
-
- if (!commit->parents) {
- parent = NULL;
- }
- else if (commit->parents->next) {
- /* Reverting or cherry-picking a merge commit */
- int cnt;
- struct commit_list *p;
-
- if (!opts->mainline)
- return error(_("Commit %s is a merge but no -m option was given."),
- sha1_to_hex(commit->object.sha1));
-
- for (cnt = 1, p = commit->parents;
- cnt != opts->mainline && p;
- cnt++)
- p = p->next;
- if (cnt != opts->mainline || !p)
- return error(_("Commit %s does not have parent %d"),
- sha1_to_hex(commit->object.sha1), opts->mainline);
- parent = p->item;
- } else if (0 < opts->mainline)
- return error(_("Mainline was specified but commit %s is not a merge."),
- sha1_to_hex(commit->object.sha1));
- else
- parent = commit->parents->item;
-
- if (opts->allow_ff && parent && !hashcmp(parent->object.sha1, head))
- return fast_forward_to(commit->object.sha1, head);
-
- if (parent && parse_commit(parent) < 0)
- /* TRANSLATORS: The first %s will be "revert" or
- "cherry-pick", the second %s a SHA1 */
- return error(_("%s: cannot parse parent commit %s"),
- action_name(opts), sha1_to_hex(parent->object.sha1));
-
- if (get_message(commit, &msg) != 0)
- return error(_("Cannot get commit message for %s"),
- sha1_to_hex(commit->object.sha1));
-
- /*
- * "commit" is an existing commit. We would want to apply
- * the difference it introduces since its first parent "prev"
- * on top of the current HEAD if we are cherry-pick. Or the
- * reverse of it if we are revert.
- */
-
- defmsg = git_pathdup("MERGE_MSG");
-
- if (opts->action == REVERT) {
- base = commit;
- base_label = msg.label;
- next = parent;
- next_label = msg.parent_label;
- strbuf_addstr(&msgbuf, "Revert \"");
- strbuf_addstr(&msgbuf, msg.subject);
- strbuf_addstr(&msgbuf, "\"\n\nThis reverts commit ");
- strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
-
- if (commit->parents && commit->parents->next) {
- strbuf_addstr(&msgbuf, ", reversing\nchanges made to ");
- strbuf_addstr(&msgbuf, sha1_to_hex(parent->object.sha1));
- }
- strbuf_addstr(&msgbuf, ".\n");
- } else {
- const char *p;
-
- base = parent;
- base_label = msg.parent_label;
- next = commit;
- next_label = msg.label;
-
- /*
- * Append the commit log message to msgbuf; it starts
- * after the tree, parent, author, committer
- * information followed by "\n\n".
- */
- p = strstr(msg.message, "\n\n");
- if (p) {
- p += 2;
- strbuf_addstr(&msgbuf, p);
- }
-
- if (opts->record_origin) {
- strbuf_addstr(&msgbuf, "(cherry picked from commit ");
- strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
- strbuf_addstr(&msgbuf, ")\n");
- }
- }
-
- if (!opts->strategy || !strcmp(opts->strategy, "recursive") || opts->action == REVERT) {
- res = do_recursive_merge(base, next, base_label, next_label,
- head, &msgbuf, opts);
- write_message(&msgbuf, defmsg);
- } else {
- struct commit_list *common = NULL;
- struct commit_list *remotes = NULL;
-
- write_message(&msgbuf, defmsg);
-
- commit_list_insert(base, &common);
- commit_list_insert(next, &remotes);
- res = try_merge_command(opts->strategy, opts->xopts_nr, opts->xopts,
- common, sha1_to_hex(head), remotes);
- free_commit_list(common);
- free_commit_list(remotes);
- }
-
- /*
- * If the merge was clean or if it failed due to conflict, we write
- * CHERRY_PICK_HEAD for the subsequent invocation of commit to use.
- * However, if the merge did not even start, then we don't want to
- * write it at all.
- */
- if (opts->action == CHERRY_PICK && !opts->no_commit && (res == 0 || res == 1))
- write_cherry_pick_head(commit, "CHERRY_PICK_HEAD");
- if (opts->action == REVERT && ((opts->no_commit && res == 0) || res == 1))
- write_cherry_pick_head(commit, "REVERT_HEAD");
-
- if (res) {
- error(opts->action == REVERT
- ? _("could not revert %s... %s")
- : _("could not apply %s... %s"),
- find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV),
- msg.subject);
- print_advice(res == 1);
- rerere(opts->allow_rerere_auto);
- } else {
- if (!opts->no_commit)
- res = run_git_commit(defmsg, opts);
- }
-
- free_message(&msg);
- free(defmsg);
-
- return res;
-}
-
-static void prepare_revs(struct replay_opts *opts)
-{
- if (opts->action != REVERT)
- opts->revs->reverse ^= 1;
-
- if (prepare_revision_walk(opts->revs))
- die(_("revision walk setup failed"));
-
- if (!opts->revs->commits)
- die(_("empty commit set passed"));
-}
-
-static void read_and_refresh_cache(struct replay_opts *opts)
-{
- static struct lock_file index_lock;
- int index_fd = hold_locked_index(&index_lock, 0);
- if (read_index_preload(&the_index, NULL) < 0)
- die(_("git %s: failed to read the index"), action_name(opts));
- refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
- if (the_index.cache_changed) {
- if (write_index(&the_index, index_fd) ||
- commit_locked_index(&index_lock))
- die(_("git %s: failed to refresh the index"), action_name(opts));
- }
- rollback_lock_file(&index_lock);
-}
-
-/*
- * Append a commit to the end of the commit_list.
- *
- * next starts by pointing to the variable that holds the head of an
- * empty commit_list, and is updated to point to the "next" field of
- * the last item on the list as new commits are appended.
- *
- * Usage example:
- *
- * struct commit_list *list;
- * struct commit_list **next = &list;
- *
- * next = commit_list_append(c1, next);
- * next = commit_list_append(c2, next);
- * assert(commit_list_count(list) == 2);
- * return list;
- */
-static struct commit_list **commit_list_append(struct commit *commit,
- struct commit_list **next)
-{
- struct commit_list *new = xmalloc(sizeof(struct commit_list));
- new->item = commit;
- *next = new;
- new->next = NULL;
- return &new->next;
-}
-
-static int format_todo(struct strbuf *buf, struct commit_list *todo_list,
- struct replay_opts *opts)
-{
- struct commit_list *cur = NULL;
- const char *sha1_abbrev = NULL;
- const char *action_str = opts->action == REVERT ? "revert" : "pick";
- const char *subject;
- int subject_len;
-
- for (cur = todo_list; cur; cur = cur->next) {
- sha1_abbrev = find_unique_abbrev(cur->item->object.sha1, DEFAULT_ABBREV);
- subject_len = find_commit_subject(cur->item->buffer, &subject);
- strbuf_addf(buf, "%s %s %.*s\n", action_str, sha1_abbrev,
- subject_len, subject);
- }
- return 0;
-}
-
-static struct commit *parse_insn_line(char *bol, char *eol, struct replay_opts *opts)
-{
- unsigned char commit_sha1[20];
- enum replay_action action;
- char *end_of_object_name;
- int saved, status, padding;
-
- if (!prefixcmp(bol, "pick")) {
- action = CHERRY_PICK;
- bol += strlen("pick");
- } else if (!prefixcmp(bol, "revert")) {
- action = REVERT;
- bol += strlen("revert");
- } else
- return NULL;
-
- /* Eat up extra spaces/ tabs before object name */
- padding = strspn(bol, " \t");
- if (!padding)
- return NULL;
- bol += padding;
-
- end_of_object_name = bol + strcspn(bol, " \t\n");
- saved = *end_of_object_name;
- *end_of_object_name = '\0';
- status = get_sha1(bol, commit_sha1);
- *end_of_object_name = saved;
-
- /*
- * Verify that the action matches up with the one in
- * opts; we don't support arbitrary instructions
- */
- if (action != opts->action) {
- const char *action_str;
- action_str = action == REVERT ? "revert" : "cherry-pick";
- error(_("Cannot %s during a %s"), action_str, action_name(opts));
- return NULL;
- }
-
- if (status < 0)
- return NULL;
-
- return lookup_commit_reference(commit_sha1);
-}
-
-static int parse_insn_buffer(char *buf, struct commit_list **todo_list,
- struct replay_opts *opts)
-{
- struct commit_list **next = todo_list;
- struct commit *commit;
- char *p = buf;
- int i;
-
- for (i = 1; *p; i++) {
- char *eol = strchrnul(p, '\n');
- commit = parse_insn_line(p, eol, opts);
- if (!commit)
- return error(_("Could not parse line %d."), i);
- next = commit_list_append(commit, next);
- p = *eol ? eol + 1 : eol;
- }
- if (!*todo_list)
- return error(_("No commits parsed."));
- return 0;
-}
-
-static void read_populate_todo(struct commit_list **todo_list,
- struct replay_opts *opts)
-{
- const char *todo_file = git_path(SEQ_TODO_FILE);
- struct strbuf buf = STRBUF_INIT;
- int fd, res;
-
- fd = open(todo_file, O_RDONLY);
- if (fd < 0)
- die_errno(_("Could not open %s"), todo_file);
- if (strbuf_read(&buf, fd, 0) < 0) {
- close(fd);
- strbuf_release(&buf);
- die(_("Could not read %s."), todo_file);
- }
- close(fd);
-
- res = parse_insn_buffer(buf.buf, todo_list, opts);
- strbuf_release(&buf);
- if (res)
- die(_("Unusable instruction sheet: %s"), todo_file);
-}
-
-static int populate_opts_cb(const char *key, const char *value, void *data)
-{
- struct replay_opts *opts = data;
- int error_flag = 1;
-
- if (!value)
- error_flag = 0;
- else if (!strcmp(key, "options.no-commit"))
- opts->no_commit = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.edit"))
- opts->edit = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.signoff"))
- opts->signoff = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.record-origin"))
- opts->record_origin = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.allow-ff"))
- opts->allow_ff = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.mainline"))
- opts->mainline = git_config_int(key, value);
- else if (!strcmp(key, "options.strategy"))
- git_config_string(&opts->strategy, key, value);
- else if (!strcmp(key, "options.strategy-option")) {
- ALLOC_GROW(opts->xopts, opts->xopts_nr + 1, opts->xopts_alloc);
- opts->xopts[opts->xopts_nr++] = xstrdup(value);
- } else
- return error(_("Invalid key: %s"), key);
-
- if (!error_flag)
- return error(_("Invalid value for %s: %s"), key, value);
-
- return 0;
-}
-
-static void read_populate_opts(struct replay_opts **opts_ptr)
-{
- const char *opts_file = git_path(SEQ_OPTS_FILE);
-
- if (!file_exists(opts_file))
- return;
- if (git_config_from_file(populate_opts_cb, opts_file, *opts_ptr) < 0)
- die(_("Malformed options sheet: %s"), opts_file);
-}
-
-static void walk_revs_populate_todo(struct commit_list **todo_list,
- struct replay_opts *opts)
-{
- struct commit *commit;
- struct commit_list **next;
-
- prepare_revs(opts);
-
- next = todo_list;
- while ((commit = get_revision(opts->revs)))
- next = commit_list_append(commit, next);
-}
-
-static int create_seq_dir(void)
-{
- const char *seq_dir = git_path(SEQ_DIR);
-
- if (file_exists(seq_dir)) {
- error(_("a cherry-pick or revert is already in progress"));
- advise(_("try \"git cherry-pick (--continue | --quit | --abort)\""));
- return -1;
- }
- else if (mkdir(seq_dir, 0777) < 0)
- die_errno(_("Could not create sequencer directory %s"), seq_dir);
- return 0;
-}
-
-static void save_head(const char *head)
-{
- const char *head_file = git_path(SEQ_HEAD_FILE);
- static struct lock_file head_lock;
- struct strbuf buf = STRBUF_INIT;
- int fd;
-
- fd = hold_lock_file_for_update(&head_lock, head_file, LOCK_DIE_ON_ERROR);
- strbuf_addf(&buf, "%s\n", head);
- if (write_in_full(fd, buf.buf, buf.len) < 0)
- die_errno(_("Could not write to %s"), head_file);
- if (commit_lock_file(&head_lock) < 0)
- die(_("Error wrapping up %s."), head_file);
-}
-
-static int reset_for_rollback(const unsigned char *sha1)
-{
- const char *argv[4]; /* reset --merge <arg> + NULL */
- argv[0] = "reset";
- argv[1] = "--merge";
- argv[2] = sha1_to_hex(sha1);
- argv[3] = NULL;
- return run_command_v_opt(argv, RUN_GIT_CMD);
-}
-
-static int rollback_single_pick(void)
-{
- unsigned char head_sha1[20];
-
- if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
- !file_exists(git_path("REVERT_HEAD")))
- return error(_("no cherry-pick or revert in progress"));
- if (read_ref_full("HEAD", head_sha1, 0, NULL))
- return error(_("cannot resolve HEAD"));
- if (is_null_sha1(head_sha1))
- return error(_("cannot abort from a branch yet to be born"));
- return reset_for_rollback(head_sha1);
-}
-
-static int sequencer_rollback(struct replay_opts *opts)
-{
- const char *filename;
- FILE *f;
- unsigned char sha1[20];
- struct strbuf buf = STRBUF_INIT;
-
- filename = git_path(SEQ_HEAD_FILE);
- f = fopen(filename, "r");
- if (!f && errno == ENOENT) {
- /*
- * There is no multiple-cherry-pick in progress.
- * If CHERRY_PICK_HEAD or REVERT_HEAD indicates
- * a single-cherry-pick in progress, abort that.
- */
- return rollback_single_pick();
- }
- if (!f)
- return error(_("cannot open %s: %s"), filename,
- strerror(errno));
- if (strbuf_getline(&buf, f, '\n')) {
- error(_("cannot read %s: %s"), filename, ferror(f) ?
- strerror(errno) : _("unexpected end of file"));
- fclose(f);
- goto fail;
- }
- fclose(f);
- if (get_sha1_hex(buf.buf, sha1) || buf.buf[40] != '\0') {
- error(_("stored pre-cherry-pick HEAD file '%s' is corrupt"),
- filename);
- goto fail;
- }
- if (reset_for_rollback(sha1))
- goto fail;
- remove_sequencer_state();
- strbuf_release(&buf);
- return 0;
-fail:
- strbuf_release(&buf);
- return -1;
-}
-
-static void save_todo(struct commit_list *todo_list, struct replay_opts *opts)
-{
- const char *todo_file = git_path(SEQ_TODO_FILE);
- static struct lock_file todo_lock;
- struct strbuf buf = STRBUF_INIT;
- int fd;
-
- fd = hold_lock_file_for_update(&todo_lock, todo_file, LOCK_DIE_ON_ERROR);
- if (format_todo(&buf, todo_list, opts) < 0)
- die(_("Could not format %s."), todo_file);
- if (write_in_full(fd, buf.buf, buf.len) < 0) {
- strbuf_release(&buf);
- die_errno(_("Could not write to %s"), todo_file);
- }
- if (commit_lock_file(&todo_lock) < 0) {
- strbuf_release(&buf);
- die(_("Error wrapping up %s."), todo_file);
- }
- strbuf_release(&buf);
-}
-
-static void save_opts(struct replay_opts *opts)
-{
- const char *opts_file = git_path(SEQ_OPTS_FILE);
-
- if (opts->no_commit)
- git_config_set_in_file(opts_file, "options.no-commit", "true");
- if (opts->edit)
- git_config_set_in_file(opts_file, "options.edit", "true");
- if (opts->signoff)
- git_config_set_in_file(opts_file, "options.signoff", "true");
- if (opts->record_origin)
- git_config_set_in_file(opts_file, "options.record-origin", "true");
- if (opts->allow_ff)
- git_config_set_in_file(opts_file, "options.allow-ff", "true");
- if (opts->mainline) {
- struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%d", opts->mainline);
- git_config_set_in_file(opts_file, "options.mainline", buf.buf);
- strbuf_release(&buf);
- }
- if (opts->strategy)
- git_config_set_in_file(opts_file, "options.strategy", opts->strategy);
- if (opts->xopts) {
- int i;
- for (i = 0; i < opts->xopts_nr; i++)
- git_config_set_multivar_in_file(opts_file,
- "options.strategy-option",
- opts->xopts[i], "^$", 0);
- }
-}
-
-static int pick_commits(struct commit_list *todo_list, struct replay_opts *opts)
-{
- struct commit_list *cur;
- int res;
-
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
- if (opts->allow_ff)
- assert(!(opts->signoff || opts->no_commit ||
- opts->record_origin || opts->edit));
- read_and_refresh_cache(opts);
-
- for (cur = todo_list; cur; cur = cur->next) {
- save_todo(cur, opts);
- res = do_pick_commit(cur->item, opts);
- if (res)
- return res;
- }
-
- /*
- * Sequence of picks finished successfully; cleanup by
- * removing the .git/sequencer directory
- */
- remove_sequencer_state();
- return 0;
-}
-
-static int continue_single_pick(void)
-{
- const char *argv[] = { "commit", NULL };
-
- if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
- !file_exists(git_path("REVERT_HEAD")))
- return error(_("no cherry-pick or revert in progress"));
- return run_command_v_opt(argv, RUN_GIT_CMD);
-}
-
-static int sequencer_continue(struct replay_opts *opts)
-{
- struct commit_list *todo_list = NULL;
-
- if (!file_exists(git_path(SEQ_TODO_FILE)))
- return continue_single_pick();
- read_populate_opts(&opts);
- read_populate_todo(&todo_list, opts);
-
- /* Verify that the conflict has been resolved */
- if (file_exists(git_path("CHERRY_PICK_HEAD")) ||
- file_exists(git_path("REVERT_HEAD"))) {
- int ret = continue_single_pick();
- if (ret)
- return ret;
- }
- if (index_differs_from("HEAD", 0))
- return error_dirty_index(opts);
- todo_list = todo_list->next;
- return pick_commits(todo_list, opts);
-}
-
-static int single_pick(struct commit *cmit, struct replay_opts *opts)
-{
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
- return do_pick_commit(cmit, opts);
-}
-
-static int pick_revisions(struct replay_opts *opts)
-{
- struct commit_list *todo_list = NULL;
- unsigned char sha1[20];
-
- if (opts->subcommand == REPLAY_NONE)
- assert(opts->revs);
-
- read_and_refresh_cache(opts);
-
- /*
- * Decide what to do depending on the arguments; a fresh
- * cherry-pick should be handled differently from an existing
- * one that is being continued
- */
- if (opts->subcommand == REPLAY_REMOVE_STATE) {
- remove_sequencer_state();
- return 0;
- }
- if (opts->subcommand == REPLAY_ROLLBACK)
- return sequencer_rollback(opts);
- if (opts->subcommand == REPLAY_CONTINUE)
- return sequencer_continue(opts);
-
- /*
- * If we were called as "git cherry-pick <commit>", just
- * cherry-pick/revert it, set CHERRY_PICK_HEAD /
- * REVERT_HEAD, and don't touch the sequencer state.
- * This means it is possible to cherry-pick in the middle
- * of a cherry-pick sequence.
- */
- if (opts->revs->cmdline.nr == 1 &&
- opts->revs->cmdline.rev->whence == REV_CMD_REV &&
- opts->revs->no_walk &&
- !opts->revs->cmdline.rev->flags) {
- struct commit *cmit;
- if (prepare_revision_walk(opts->revs))
- die(_("revision walk setup failed"));
- cmit = get_revision(opts->revs);
- if (!cmit || get_revision(opts->revs))
- die("BUG: expected exactly one commit from walk");
- return single_pick(cmit, opts);
- }
-
- /*
- * Start a new cherry-pick/ revert sequence; but
- * first, make sure that an existing one isn't in
- * progress
- */
-
- walk_revs_populate_todo(&todo_list, opts);
- if (create_seq_dir() < 0)
- return -1;
- if (get_sha1("HEAD", sha1)) {
- if (opts->action == REVERT)
- return error(_("Can't revert as initial commit"));
- return error(_("Can't cherry-pick into empty head"));
- }
- save_head(sha1_to_hex(sha1));
- save_opts(opts);
- return pick_commits(todo_list, opts);
-}
-
int cmd_revert(int argc, const char **argv, const char *prefix)
{
struct replay_opts opts;
@@ -1141,10 +201,10 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
memset(&opts, 0, sizeof(opts));
if (isatty(0))
opts.edit = 1;
- opts.action = REVERT;
+ opts.action = REPLAY_REVERT;
git_config(git_default_config, NULL);
parse_args(argc, argv, &opts);
- res = pick_revisions(&opts);
+ res = sequencer_pick_revisions(&opts);
if (res < 0)
die(_("revert failed"));
return res;
@@ -1156,10 +216,10 @@ int cmd_cherry_pick(int argc, const char **argv, const char *prefix)
int res;
memset(&opts, 0, sizeof(opts));
- opts.action = CHERRY_PICK;
+ opts.action = REPLAY_PICK;
git_config(git_default_config, NULL);
parse_args(argc, argv, &opts);
- res = pick_revisions(&opts);
+ res = sequencer_pick_revisions(&opts);
if (res < 0)
die(_("cherry-pick failed"));
return res;
diff --git a/builtin/send-pack.c b/builtin/send-pack.c
index cd1115ffc6..71f258ef6e 100644
--- a/builtin/send-pack.c
+++ b/builtin/send-pack.c
@@ -263,6 +263,8 @@ int send_pack(struct send_pack_args *args,
args->use_ofs_delta = 1;
if (server_supports("side-band-64k"))
use_sideband = 1;
+ if (!server_supports("quiet"))
+ args->quiet = 0;
if (!remote_refs) {
fprintf(stderr, "No refs in common and none specified; doing nothing.\n"
@@ -301,11 +303,12 @@ int send_pack(struct send_pack_args *args,
char *old_hex = sha1_to_hex(ref->old_sha1);
char *new_hex = sha1_to_hex(ref->new_sha1);
- if (!cmds_sent && (status_report || use_sideband)) {
- packet_buf_write(&req_buf, "%s %s %s%c%s%s",
+ if (!cmds_sent && (status_report || use_sideband || args->quiet)) {
+ packet_buf_write(&req_buf, "%s %s %s%c%s%s%s",
old_hex, new_hex, ref->name, 0,
status_report ? " report-status" : "",
- use_sideband ? " side-band-64k" : "");
+ use_sideband ? " side-band-64k" : "",
+ args->quiet ? " quiet" : "");
}
else
packet_buf_write(&req_buf, "%s %s %s",
@@ -439,6 +442,10 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix)
args.force_update = 1;
continue;
}
+ if (!strcmp(arg, "--quiet")) {
+ args.quiet = 1;
+ continue;
+ }
if (!strcmp(arg, "--verbose")) {
args.verbose = 1;
continue;
diff --git a/builtin/tag.c b/builtin/tag.c
index 31f02e80f6..03df16ac6e 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -83,18 +83,51 @@ static int contains(struct commit *candidate, const struct commit_list *want)
return contains_recurse(candidate, want);
}
+static void show_tag_lines(const unsigned char *sha1, int lines)
+{
+ int i;
+ unsigned long size;
+ enum object_type type;
+ char *buf, *sp, *eol;
+ size_t len;
+
+ buf = read_sha1_file(sha1, &type, &size);
+ if (!buf)
+ die_errno("unable to read object %s", sha1_to_hex(sha1));
+ if (type != OBJ_COMMIT && type != OBJ_TAG)
+ goto free_return;
+ if (!size)
+ die("an empty %s object %s?",
+ typename(type), sha1_to_hex(sha1));
+
+ /* skip header */
+ sp = strstr(buf, "\n\n");
+ if (!sp)
+ goto free_return;
+
+ /* only take up to "lines" lines, and strip the signature from a tag */
+ if (type == OBJ_TAG)
+ size = parse_signature(buf, size);
+ for (i = 0, sp += 2; i < lines && sp < buf + size; i++) {
+ if (i)
+ printf("\n ");
+ eol = memchr(sp, '\n', size - (sp - buf));
+ len = eol ? eol - sp : size - (sp - buf);
+ fwrite(sp, len, 1, stdout);
+ if (!eol)
+ break;
+ sp = eol + 1;
+ }
+free_return:
+ free(buf);
+}
+
static int show_reference(const char *refname, const unsigned char *sha1,
int flag, void *cb_data)
{
struct tag_filter *filter = cb_data;
if (match_pattern(filter->patterns, refname)) {
- int i;
- unsigned long size;
- enum object_type type;
- char *buf, *sp, *eol;
- size_t len;
-
if (filter->with_commit) {
struct commit *commit;
@@ -110,33 +143,8 @@ static int show_reference(const char *refname, const unsigned char *sha1,
return 0;
}
printf("%-15s ", refname);
-
- buf = read_sha1_file(sha1, &type, &size);
- if (!buf || !size)
- return 0;
-
- /* skip header */
- sp = strstr(buf, "\n\n");
- if (!sp) {
- free(buf);
- return 0;
- }
- /* only take up to "lines" lines, and strip the signature */
- size = parse_signature(buf, size);
- for (i = 0, sp += 2;
- i < filter->lines && sp < buf + size;
- i++) {
- if (i)
- printf("\n ");
- eol = memchr(sp, '\n', size - (sp - buf));
- len = eol ? eol - sp : size - (sp - buf);
- fwrite(sp, len, 1, stdout);
- if (!eol)
- break;
- sp = eol + 1;
- }
+ show_tag_lines(sha1, filter->lines);
putchar('\n');
- free(buf);
}
return 0;
diff --git a/cache-tree.c b/cache-tree.c
index c1eacafd19..28ed6574a2 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -159,7 +159,7 @@ static int verify_cache(struct cache_entry **cache,
funny = 0;
for (i = 0; i < entries; i++) {
struct cache_entry *ce = cache[i];
- if (ce_stage(ce) || (ce->ce_flags & CE_INTENT_TO_ADD)) {
+ if (ce_stage(ce)) {
if (silent)
return -1;
if (10 < ++funny) {
@@ -339,8 +339,8 @@ static int update_one(struct cache_tree *it,
mode, sha1_to_hex(sha1), entlen+baselen, path);
}
- if (ce->ce_flags & CE_REMOVE)
- continue; /* entry being removed */
+ if (ce->ce_flags & (CE_REMOVE | CE_INTENT_TO_ADD))
+ continue; /* entry being removed or placeholder */
strbuf_grow(&buffer, entlen + 100);
strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
diff --git a/cache.h b/cache.h
index 10afd71d43..9bd8c2d06c 100644
--- a/cache.h
+++ b/cache.h
@@ -1037,6 +1037,7 @@ struct extra_have_objects {
};
extern struct ref **get_remote_heads(int in, struct ref **list, unsigned int flags, struct extra_have_objects *);
extern int server_supports(const char *feature);
+extern const char *parse_feature_request(const char *features, const char *feature);
extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
diff --git a/commit.c b/commit.c
index 35af4988f0..4b39c19123 100644
--- a/commit.c
+++ b/commit.c
@@ -422,7 +422,8 @@ struct commit *pop_most_recent_commit(struct commit_list **list,
return ret;
}
-void clear_commit_marks(struct commit *commit, unsigned int mark)
+static void clear_commit_marks_1(struct commit_list **plist,
+ struct commit *commit, unsigned int mark)
{
while (commit) {
struct commit_list *parents;
@@ -437,12 +438,20 @@ void clear_commit_marks(struct commit *commit, unsigned int mark)
return;
while ((parents = parents->next))
- clear_commit_marks(parents->item, mark);
+ commit_list_insert(parents->item, plist);
commit = commit->parents->item;
}
}
+void clear_commit_marks(struct commit *commit, unsigned int mark)
+{
+ struct commit_list *list = NULL;
+ commit_list_insert(commit, &list);
+ while (list)
+ clear_commit_marks_1(&list, pop_commit(&list), mark);
+}
+
void clear_commit_marks_for_object_array(struct object_array *a, unsigned mark)
{
struct object *object;
diff --git a/compat/inet_ntop.c b/compat/inet_ntop.c
index 60b5a1d0f8..90b7cc45f3 100644
--- a/compat/inet_ntop.c
+++ b/compat/inet_ntop.c
@@ -15,14 +15,8 @@
* SOFTWARE.
*/
-#include <errno.h>
-#include <sys/types.h>
-
#include "../git-compat-util.h"
-#include <stdio.h>
-#include <string.h>
-
#ifndef NS_INADDRSZ
#define NS_INADDRSZ 4
#endif
diff --git a/compat/inet_pton.c b/compat/inet_pton.c
index 2ec995e63d..2b9a0a4e22 100644
--- a/compat/inet_pton.c
+++ b/compat/inet_pton.c
@@ -15,14 +15,8 @@
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <errno.h>
-#include <sys/types.h>
-
#include "../git-compat-util.h"
-#include <stdio.h>
-#include <string.h>
-
#ifndef NS_INT16SZ
#define NS_INT16SZ 2
#endif
diff --git a/connect.c b/connect.c
index 2ea5c3c0fb..912cddeea8 100644
--- a/connect.c
+++ b/connect.c
@@ -101,8 +101,27 @@ struct ref **get_remote_heads(int in, struct ref **list,
int server_supports(const char *feature)
{
- return server_capabilities &&
- strstr(server_capabilities, feature) != NULL;
+ return !!parse_feature_request(server_capabilities, feature);
+}
+
+const char *parse_feature_request(const char *feature_list, const char *feature)
+{
+ int len;
+
+ if (!feature_list)
+ return NULL;
+
+ len = strlen(feature);
+ while (*feature_list) {
+ const char *found = strstr(feature_list, feature);
+ if (!found)
+ return NULL;
+ if ((feature_list == found || isspace(found[-1])) &&
+ (!found[len] || isspace(found[len]) || found[len] == '='))
+ return found;
+ feature_list = found + 1;
+ }
+ return NULL;
}
enum protocol {
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index 1496c6dc05..d7367e9faa 100755
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -495,11 +495,8 @@ fi
# 4: A suffix to be appended to each possible completion word (optional).
__gitcomp ()
{
- local cur_="$cur"
+ local cur_="${3-$cur}"
- if [ $# -gt 2 ]; then
- cur_="$3"
- fi
case "$cur_" in
--*=)
COMPREPLY=()
@@ -524,18 +521,8 @@ __gitcomp ()
# appended.
__gitcomp_nl ()
{
- local s=$'\n' IFS=' '$'\t'$'\n'
- local cur_="$cur" suffix=" "
-
- if [ $# -gt 2 ]; then
- cur_="$3"
- if [ $# -gt 3 ]; then
- suffix="$4"
- fi
- fi
-
- IFS=$s
- COMPREPLY=($(compgen -P "${2-}" -S "$suffix" -W "$1" -- "$cur_"))
+ local IFS=$'\n'
+ COMPREPLY=($(compgen -P "${2-}" -S "${4- }" -W "$1" -- "${3-$cur}"))
}
__git_heads ()
@@ -643,13 +630,8 @@ __git_refs_remotes ()
__git_remotes ()
{
- local i ngoff IFS=$'\n' d="$(__gitdir)"
- __git_shopt -q nullglob || ngoff=1
- __git_shopt -s nullglob
- for i in "$d/remotes"/*; do
- echo ${i#$d/remotes/}
- done
- [ "$ngoff" ] && __git_shopt -u nullglob
+ local i IFS=$'\n' d="$(__gitdir)"
+ test -d "$d/remotes" && ls -1 "$d/remotes"
for i in $(git --git-dir="$d" config --get-regexp 'remote\..*\.url' 2>/dev/null); do
i="${i#remote.}"
echo "${i/.url*/}"
@@ -676,7 +658,8 @@ __git_merge_strategies=
# is needed.
__git_compute_merge_strategies ()
{
- : ${__git_merge_strategies:=$(__git_list_merge_strategies)}
+ test -n "$__git_merge_strategies" ||
+ __git_merge_strategies=$(__git_list_merge_strategies)
}
__git_complete_revlist_file ()
@@ -854,7 +837,8 @@ __git_list_all_commands ()
__git_all_commands=
__git_compute_all_commands ()
{
- : ${__git_all_commands:=$(__git_list_all_commands)}
+ test -n "$__git_all_commands" ||
+ __git_all_commands=$(__git_list_all_commands)
}
__git_list_porcelain_commands ()
@@ -947,7 +931,8 @@ __git_porcelain_commands=
__git_compute_porcelain_commands ()
{
__git_compute_all_commands
- : ${__git_porcelain_commands:=$(__git_list_porcelain_commands)}
+ test -n "$__git_porcelain_commands" ||
+ __git_porcelain_commands=$(__git_list_porcelain_commands)
}
__git_pretty_aliases ()
@@ -1152,7 +1137,7 @@ _git_branch ()
__gitcomp "
--color --no-color --verbose --abbrev= --no-abbrev
--track --no-track --contains --merged --no-merged
- --set-upstream
+ --set-upstream --edit-description
"
;;
*)
@@ -1622,7 +1607,7 @@ _git_log ()
__git_merge_options="
--no-commit --no-stat --log --no-log --squash --strategy
- --commit --stat --no-squash --ff --no-ff --ff-only
+ --commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit
"
_git_merge ()
@@ -2733,33 +2718,3 @@ if [ Cygwin = "$(uname -o 2>/dev/null)" ]; then
complete -o bashdefault -o default -o nospace -F _git git.exe 2>/dev/null \
|| complete -o default -o nospace -F _git git.exe
fi
-
-if [[ -n ${ZSH_VERSION-} ]]; then
- __git_shopt () {
- local option
- if [ $# -ne 2 ]; then
- echo "USAGE: $0 (-q|-s|-u) <option>" >&2
- return 1
- fi
- case "$2" in
- nullglob)
- option="$2"
- ;;
- *)
- echo "$0: invalid option: $2" >&2
- return 1
- esac
- case "$1" in
- -q) setopt | grep -q "$option" ;;
- -u) unsetopt "$option" ;;
- -s) setopt "$option" ;;
- *)
- echo "$0: invalid flag: $1" >&2
- return 1
- esac
- }
-else
- __git_shopt () {
- shopt "$@"
- }
-fi
diff --git a/contrib/fast-import/git-p4 b/contrib/fast-import/git-p4
index 3e1aa276cf..a78d9c5493 100755
--- a/contrib/fast-import/git-p4
+++ b/contrib/fast-import/git-p4
@@ -563,6 +563,26 @@ class Command:
class P4UserMap:
def __init__(self):
self.userMapFromPerforceServer = False
+ self.myP4UserId = None
+
+ def p4UserId(self):
+ if self.myP4UserId:
+ return self.myP4UserId
+
+ results = p4CmdList("user -o")
+ for r in results:
+ if r.has_key('User'):
+ self.myP4UserId = r['User']
+ return r['User']
+ die("Could not find your p4 user id")
+
+ def p4UserIsMe(self, p4User):
+ # return True if the given p4 user is actually me
+ me = self.p4UserId()
+ if not p4User or p4User != me:
+ return False
+ else:
+ return True
def getUserCacheFilename(self):
home = os.environ.get("HOME", os.environ.get("USERPROFILE"))
@@ -700,7 +720,6 @@ class P4Submit(Command, P4UserMap):
self.verbose = False
self.preserveUser = gitConfig("git-p4.preserveUser").lower() == "true"
self.isWindows = (platform.system() == "Windows")
- self.myP4UserId = None
def check(self):
if len(p4CmdList("opened ...")) > 0:
@@ -799,7 +818,7 @@ class P4Submit(Command, P4UserMap):
def canChangeChangelists(self):
# check to see if we have p4 admin or super-user permissions, either of
# which are required to modify changelists.
- results = p4CmdList("protects %s" % self.depotPath)
+ results = p4CmdList(["protects", self.depotPath])
for r in results:
if r.has_key('perm'):
if r['perm'] == 'admin':
@@ -808,25 +827,6 @@ class P4Submit(Command, P4UserMap):
return 1
return 0
- def p4UserId(self):
- if self.myP4UserId:
- return self.myP4UserId
-
- results = p4CmdList("user -o")
- for r in results:
- if r.has_key('User'):
- self.myP4UserId = r['User']
- return r['User']
- die("Could not find your p4 user id")
-
- def p4UserIsMe(self, p4User):
- # return True if the given p4 user is actually me
- me = self.p4UserId()
- if not p4User or p4User != me:
- return False
- else:
- return True
-
def prepareSubmitTemplate(self):
# remove lines in the Files section that show changes to files outside the depot path we're committing into
template = ""
@@ -1207,8 +1207,8 @@ class View(object):
die("Can't handle * wildcards in view: %s" % self.path)
triple_dot_index = self.path.find("...")
if triple_dot_index >= 0:
- if not self.path.endswith("..."):
- die("Can handle ... wildcard only at end of path: %s" %
+ if triple_dot_index != len(self.path) - 3:
+ die("Can handle only single ... wildcard, at end: %s" %
self.path)
self.ends_triple_dot = True
@@ -1263,7 +1263,7 @@ class View(object):
if self.exclude:
c = "-"
return "View.Mapping: %s%s -> %s" % \
- (c, self.depot_side, self.client_side)
+ (c, self.depot_side.path, self.client_side.path)
def map_depot_to_client(self, depot_path):
"""Calculate the client path if using this mapping on the
@@ -1363,7 +1363,8 @@ class View(object):
else:
# This mapping matched; no need to search any further.
# But, the mapping could be rejected if the client path
- # has already been claimed by an earlier mapping.
+ # has already been claimed by an earlier mapping (i.e.
+ # one later in the list, which we are walking backwards).
already_mapped_in_client = False
for f in paths_filled:
# this is View.Path.match
@@ -1429,6 +1430,8 @@ class P4Sync(Command, P4UserMap):
self.cloneExclude = []
self.useClientSpec = False
self.clientSpecDirs = None
+ self.tempBranches = []
+ self.tempBranchLocation = "git-p4-tmp"
if gitConfig("git-p4.syncFromOrigin") == "false":
self.syncWithOrigin = False
@@ -1450,6 +1453,14 @@ class P4Sync(Command, P4UserMap):
.replace("%25", "%")
return path
+ # Force a checkpoint in fast-import and wait for it to finish
+ def checkpoint(self):
+ self.gitStream.write("checkpoint\n\n")
+ self.gitStream.write("progress checkpoint\n\n")
+ out = self.gitOutput.readline()
+ if self.verbose:
+ print "checkpoint finished: " + out
+
def extractFilesFromCommit(self, commit):
self.cloneExclude = [re.sub(r"\.\.\.$", "", path)
for path in self.cloneExclude]
@@ -1664,6 +1675,12 @@ class P4Sync(Command, P4UserMap):
if self.stream_file.has_key('depotFile'):
self.streamOneP4File(self.stream_file, self.stream_contents)
+ def make_email(self, userid):
+ if userid in self.users:
+ return self.users[userid]
+ else:
+ return "%s <a@b>" % userid
+
def commit(self, details, files, branch, branchPrefixes, parent = ""):
epoch = details["time"]
author = details["user"]
@@ -1687,10 +1704,7 @@ class P4Sync(Command, P4UserMap):
committer = ""
if author not in self.users:
self.getUserMapFromPerforceServer()
- if author in self.users:
- committer = "%s %s %s" % (self.users[author], epoch, self.tz)
- else:
- committer = "%s <a@b> %s %s" % (author, epoch, self.tz)
+ committer = "%s %s %s" % (self.make_email(author), epoch, self.tz)
self.gitStream.write("committer %s\n" % committer)
@@ -1735,15 +1749,21 @@ class P4Sync(Command, P4UserMap):
self.gitStream.write("from %s\n" % branch)
owner = labelDetails["Owner"]
- tagger = ""
- if author in self.users:
- tagger = "%s %s %s" % (self.users[owner], epoch, self.tz)
+
+ # Try to use the owner of the p4 label, or failing that,
+ # the current p4 user id.
+ if owner:
+ email = self.make_email(owner)
else:
- tagger = "%s <a@b> %s %s" % (owner, epoch, self.tz)
+ email = self.make_email(self.p4UserId())
+ tagger = "%s %s %s" % (email, epoch, self.tz)
+
self.gitStream.write("tagger %s\n" % tagger)
- self.gitStream.write("data <<EOT\n")
- self.gitStream.write(labelDetails["Description"])
- self.gitStream.write("EOT\n\n")
+
+ description = labelDetails["Description"]
+ self.gitStream.write("data %d\n" % len(description))
+ self.gitStream.write(description)
+ self.gitStream.write("\n")
else:
if not self.silent:
@@ -1758,7 +1778,7 @@ class P4Sync(Command, P4UserMap):
def getLabels(self):
self.labels = {}
- l = p4CmdList("labels %s..." % ' '.join (self.depotPaths))
+ l = p4CmdList(["labels"] + ["%s..." % p for p in self.depotPaths])
if len(l) > 0 and not self.silent:
print "Finding files belonging to labels in %s" % `self.depotPaths`
@@ -1800,7 +1820,7 @@ class P4Sync(Command, P4UserMap):
command = "branches"
for info in p4CmdList(command):
- details = p4Cmd("branch -o %s" % info["branch"])
+ details = p4Cmd(["branch", "-o", info["branch"]])
viewIdx = 0
while details.has_key("View%s" % viewIdx):
paths = details["View%s" % viewIdx].split(" ")
@@ -1938,7 +1958,7 @@ class P4Sync(Command, P4UserMap):
sourceRef = self.gitRefForBranch(sourceBranch)
#print "source " + sourceBranch
- branchParentChange = int(p4Cmd("changes -m 1 %s...@1,%s" % (sourceDepotPath, firstChange))["change"])
+ branchParentChange = int(p4Cmd(["changes", "-m", "1", "%s...@1,%s" % (sourceDepotPath, firstChange)])["change"])
#print "branch parent: %s" % branchParentChange
gitParent = self.gitCommitByP4Change(sourceRef, branchParentChange)
if len(gitParent) > 0:
@@ -1948,10 +1968,24 @@ class P4Sync(Command, P4UserMap):
self.importChanges(changes)
return True
+ def searchParent(self, parent, branch, target):
+ parentFound = False
+ for blob in read_pipe_lines(["git", "rev-list", "--reverse", "--no-merges", parent]):
+ blob = blob.strip()
+ if len(read_pipe(["git", "diff-tree", blob, target])) == 0:
+ parentFound = True
+ if self.verbose:
+ print "Found parent of %s in commit %s" % (branch, blob)
+ break
+ if parentFound:
+ return blob
+ else:
+ return None
+
def importChanges(self, changes):
cnt = 1
for change in changes:
- description = p4Cmd("describe %s" % change)
+ description = p4Cmd(["describe", str(change)])
self.updateOptionDict(description)
if not self.silent:
@@ -2004,7 +2038,21 @@ class P4Sync(Command, P4UserMap):
parent = self.initialParents[branch]
del self.initialParents[branch]
- self.commit(description, filesForCommit, branch, [branchPrefix], parent)
+ blob = None
+ if len(parent) > 0:
+ tempBranch = os.path.join(self.tempBranchLocation, "%d" % (change))
+ if self.verbose:
+ print "Creating temporary branch: " + tempBranch
+ self.commit(description, filesForCommit, tempBranch, [branchPrefix])
+ self.tempBranches.append(tempBranch)
+ self.checkpoint()
+ blob = self.searchParent(parent, branch, tempBranch)
+ if blob:
+ self.commit(description, filesForCommit, branch, [branchPrefix], blob)
+ else:
+ if self.verbose:
+ print "Parent of %s not found. Committing into head of %s" % (branch, parent)
+ self.commit(description, filesForCommit, branch, [branchPrefix], parent)
else:
files = self.extractFilesFromCommit(description)
self.commit(description, files, self.branch, self.depotPaths,
@@ -2339,6 +2387,12 @@ class P4Sync(Command, P4UserMap):
self.gitOutput.close()
self.gitError.close()
+ # Cleanup temporary branches created during import
+ if self.tempBranches != []:
+ for branch in self.tempBranches:
+ read_pipe("git update-ref -d %s" % branch)
+ os.rmdir(os.path.join(os.environ.get("GIT_DIR", ".git"), self.tempBranchLocation))
+
return True
class P4Rebase(Command):
diff --git a/contrib/svn-fe/svn-fe.txt b/contrib/svn-fe/svn-fe.txt
index 72ffea0b3a..2dd27ceb0e 100644
--- a/contrib/svn-fe/svn-fe.txt
+++ b/contrib/svn-fe/svn-fe.txt
@@ -8,7 +8,10 @@ svn-fe - convert an SVN "dumpfile" to a fast-import stream
SYNOPSIS
--------
[verse]
-svnadmin dump --incremental REPO | svn-fe [url] | git fast-import
+mkfifo backchannel &&
+svnadmin dump --deltas REPO |
+ svn-fe [url] 3<backchannel |
+ git fast-import --cat-blob-fd=3 3>backchannel
DESCRIPTION
-----------
@@ -29,9 +32,6 @@ Subversion's repository dump format is documented in full in
Files in this format can be generated using the 'svnadmin dump' or
'svk admin dump' command.
-Dumps produced with 'svnadmin dump --deltas' (dumpfile format v3)
-are not supported.
-
OUTPUT FORMAT
-------------
The fast-import format is documented by the git-fast-import(1)
diff --git a/ctype.c b/ctype.c
index b5d856fd26..af722f957f 100644
--- a/ctype.c
+++ b/ctype.c
@@ -3,7 +3,7 @@
*
* No surprises, and works with signed and unsigned chars.
*/
-#include "cache.h"
+#include "git-compat-util.h"
enum {
S = GIT_SPACE,
diff --git a/daemon.c b/daemon.c
index 15ce918a21..ab21e66b2f 100644
--- a/daemon.c
+++ b/daemon.c
@@ -1086,6 +1086,8 @@ static int serve(struct string_list *listen_addr, int listen_port,
drop_privileges(cred);
+ loginfo("Ready to rumble");
+
return service_loop(&socklist);
}
@@ -1270,10 +1272,8 @@ int main(int argc, char **argv)
if (inetd_mode || serve_mode)
return execute();
- if (detach) {
+ if (detach)
daemonize();
- loginfo("Ready to rumble");
- }
else
sanitize_stdfds();
diff --git a/date.c b/date.c
index 353e0a5e53..a5055ca09d 100644
--- a/date.c
+++ b/date.c
@@ -597,6 +597,33 @@ static int date_string(unsigned long date, int offset, char *buf, int len)
return snprintf(buf, len, "%lu %c%02d%02d", date, sign, offset / 60, offset % 60);
}
+/*
+ * Parse a string like "0 +0000" as ancient timestamp near epoch, but
+ * only when it appears not as part of any other string.
+ */
+static int match_object_header_date(const char *date, unsigned long *timestamp, int *offset)
+{
+ char *end;
+ unsigned long stamp;
+ int ofs;
+
+ if (*date < '0' || '9' <= *date)
+ return -1;
+ stamp = strtoul(date, &end, 10);
+ if (*end != ' ' || stamp == ULONG_MAX || (end[1] != '+' && end[1] != '-'))
+ return -1;
+ date = end + 2;
+ ofs = strtol(date, &end, 10);
+ if ((*end != '\0' && (*end != '\n')) || end != date + 4)
+ return -1;
+ ofs = (ofs / 100) * 60 + (ofs % 100);
+ if (date[-1] == '-')
+ ofs = -ofs;
+ *timestamp = stamp;
+ *offset = ofs;
+ return 0;
+}
+
/* Gr. strptime is crap for this; it doesn't have a way to require RFC2822
(i.e. English) day/month names, and it doesn't work correctly with %z. */
int parse_date_basic(const char *date, unsigned long *timestamp, int *offset)
@@ -622,6 +649,9 @@ int parse_date_basic(const char *date, unsigned long *timestamp, int *offset)
*offset = -1;
tm_gmt = 0;
+ if (*date == '@' &&
+ !match_object_header_date(date + 1, timestamp, offset))
+ return 0; /* success */
for (;;) {
int match = 0;
unsigned char c = *date;
diff --git a/git-am.sh b/git-am.sh
index 1c13b13991..64d8e2a64d 100755
--- a/git-am.sh
+++ b/git-am.sh
@@ -15,6 +15,7 @@ q,quiet be quiet
s,signoff add a Signed-off-by line to the commit message
u,utf8 recode into utf8 (default)
k,keep pass -k flag to git-mailinfo
+keep-non-patch pass -b flag to git-mailinfo
keep-cr pass --keep-cr flag to git-mailsplit for mbox format
no-keep-cr do not pass --keep-cr flag to git-mailsplit independent of am.keepcr
c,scissors strip everything before a scissors line
@@ -387,6 +388,8 @@ do
utf8= ;;
-k|--keep)
keep=t ;;
+ --keep-non-patch)
+ keep=b ;;
-c|--scissors)
scissors=t ;;
--no-scissors)
@@ -565,16 +568,25 @@ case "$resolved" in
fi
esac
+# Now, decide what command line options we will give to the git
+# commands we invoke, based on the result of parsing command line
+# options and previous invocation state stored in $dotest/ files.
+
if test "$(cat "$dotest/utf8")" = t
then
utf8=-u
else
utf8=-n
fi
-if test "$(cat "$dotest/keep")" = t
-then
- keep=-k
-fi
+keep=$(cat "$dotest/keep")
+case "$keep" in
+t)
+ keep=-k ;;
+b)
+ keep=-b ;;
+*)
+ keep= ;;
+esac
case "$(cat "$dotest/scissors")" in
t)
scissors=--scissors ;;
diff --git a/git-compat-util.h b/git-compat-util.h
index 8f3972cd32..426ae43be9 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -463,6 +463,8 @@ static inline int has_extension(const char *filename, const char *ext)
#undef isdigit
#undef isalpha
#undef isalnum
+#undef islower
+#undef isupper
#undef tolower
#undef toupper
extern unsigned char sane_ctype[256];
@@ -478,6 +480,8 @@ extern unsigned char sane_ctype[256];
#define isdigit(x) sane_istest(x,GIT_DIGIT)
#define isalpha(x) sane_istest(x,GIT_ALPHA)
#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
+#define islower(x) sane_iscase(x, 1)
+#define isupper(x) sane_iscase(x, 0)
#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL)
#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL)
#define tolower(x) sane_case((unsigned char)(x), 0x20)
@@ -491,6 +495,17 @@ static inline int sane_case(int x, int high)
return x;
}
+static inline int sane_iscase(int x, int is_lower)
+{
+ if (!sane_istest(x, GIT_ALPHA))
+ return 0;
+
+ if (is_lower)
+ return (x & 0x20) != 0;
+ else
+ return (x & 0x20) == 0;
+}
+
static inline int strtoul_ui(char const *s, int base, unsigned int *result)
{
unsigned long ul;
diff --git a/git-mergetool.sh b/git-mergetool.sh
index 085e213a12..a9f23f7fcd 100755
--- a/git-mergetool.sh
+++ b/git-mergetool.sh
@@ -181,10 +181,14 @@ stage_submodule () {
}
checkout_staged_file () {
- tmpfile=$(expr "$(git checkout-index --temp --stage="$1" "$2")" : '\([^ ]*\) ')
+ tmpfile=$(expr \
+ "$(git checkout-index --temp --stage="$1" "$2" 2>/dev/null)" \
+ : '\([^ ]*\) ')
if test $? -eq 0 -a -n "$tmpfile" ; then
mv -- "$(git rev-parse --show-cdup)$tmpfile" "$3"
+ else
+ >"$3"
fi
}
@@ -224,9 +228,9 @@ merge_file () {
mv -- "$MERGED" "$BACKUP"
cp -- "$BACKUP" "$MERGED"
- base_present && checkout_staged_file 1 "$MERGED" "$BASE"
- local_present && checkout_staged_file 2 "$MERGED" "$LOCAL"
- remote_present && checkout_staged_file 3 "$MERGED" "$REMOTE"
+ checkout_staged_file 1 "$MERGED" "$BASE"
+ checkout_staged_file 2 "$MERGED" "$LOCAL"
+ checkout_staged_file 3 "$MERGED" "$REMOTE"
if test -z "$local_mode" -o -z "$remote_mode"; then
echo "Deleted merge conflict for '$MERGED':"
diff --git a/git-request-pull.sh b/git-request-pull.sh
index 64960d65a1..e6438e24c7 100755
--- a/git-request-pull.sh
+++ b/git-request-pull.sh
@@ -63,7 +63,7 @@ die "fatal: No commits in common between $base and $head"
find_matching_ref='
sub abbr {
my $ref = shift;
- if ($ref =~ s|refs/heads/|| || $ref =~ s|refs/tags/||) {
+ if ($ref =~ s|^refs/heads/|| || $ref =~ s|^refs/tags/|tags/|) {
return $ref;
} else {
return $ref;
diff --git a/git-sh-i18n.sh b/git-sh-i18n.sh
index b4575fb3a1..d5fae993b0 100644
--- a/git-sh-i18n.sh
+++ b/git-sh-i18n.sh
@@ -16,61 +16,48 @@ else
fi
export TEXTDOMAINDIR
-if test -z "$GIT_GETTEXT_POISON"
+# First decide what scheme to use...
+GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough
+if test -n "@@USE_GETTEXT_SCHEME@@"
+then
+ GIT_INTERNAL_GETTEXT_SH_SCHEME="@@USE_GETTEXT_SCHEME@@"
+elif test -n "@@USE_FALLTHROUGH_GETTEXT_SCHEME@@$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS"
+then
+ : no probing necessary
+elif test -n "$GIT_GETTEXT_POISON"
then
- if test -z "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS" && type gettext.sh >/dev/null 2>&1
- then
- # This is GNU libintl's gettext.sh, we don't need to do anything
- # else than setting up the environment and loading gettext.sh
- GIT_INTERNAL_GETTEXT_SH_SCHEME=gnu
- export GIT_INTERNAL_GETTEXT_SH_SCHEME
-
- # Try to use libintl's gettext.sh, or fall back to English if we
- # can't.
- . gettext.sh
-
- elif test -z "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS" && test "$(gettext -h 2>&1)" = "-h"
- then
- # We don't have gettext.sh, but there's a gettext binary in our
- # path. This is probably Solaris or something like it which has a
- # gettext implementation that isn't GNU libintl.
- GIT_INTERNAL_GETTEXT_SH_SCHEME=solaris
- export GIT_INTERNAL_GETTEXT_SH_SCHEME
-
- # Solaris has a gettext(1) but no eval_gettext(1)
- eval_gettext () {
- gettext "$1" | (
- export PATH $(git sh-i18n--envsubst --variables "$1");
- git sh-i18n--envsubst "$1"
- )
- }
-
- else
- # Since gettext.sh isn't available we'll have to define our own
- # dummy pass-through functions.
-
- # Tell our tests that we don't have the real gettext.sh
- GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough
- export GIT_INTERNAL_GETTEXT_SH_SCHEME
-
- gettext () {
- printf "%s" "$1"
- }
-
- eval_gettext () {
- printf "%s" "$1" | (
- export PATH $(git sh-i18n--envsubst --variables "$1");
- git sh-i18n--envsubst "$1"
- )
- }
- fi
-else
- # Emit garbage under GETTEXT_POISON=YesPlease. Unlike the C tests
- # this relies on an environment variable
-
GIT_INTERNAL_GETTEXT_SH_SCHEME=poison
- export GIT_INTERNAL_GETTEXT_SH_SCHEME
+elif type gettext.sh >/dev/null 2>&1
+then
+ # GNU libintl's gettext.sh
+ GIT_INTERNAL_GETTEXT_SH_SCHEME=gnu
+elif test "$(gettext -h 2>&1)" = "-h"
+then
+ # gettext binary exists but no gettext.sh. likely to be a gettext
+ # binary on a Solaris or something that is not GNU libintl and
+ # lack eval_gettext.
+ GIT_INTERNAL_GETTEXT_SH_SCHEME=gettext_without_eval_gettext
+fi
+export GIT_INTERNAL_GETTEXT_SH_SCHEME
+# ... and then follow that decision.
+case "$GIT_INTERNAL_GETTEXT_SH_SCHEME" in
+gnu)
+ # Use libintl's gettext.sh, or fall back to English if we can't.
+ . gettext.sh
+ ;;
+gettext_without_eval_gettext)
+ # Solaris has a gettext(1) but no eval_gettext(1)
+ eval_gettext () {
+ gettext "$1" | (
+ export PATH $(git sh-i18n--envsubst --variables "$1");
+ git sh-i18n--envsubst "$1"
+ )
+ }
+ ;;
+poison)
+ # Emit garbage so that tests that incorrectly rely on translatable
+ # strings will fail.
gettext () {
printf "%s" "# GETTEXT POISON #"
}
@@ -78,7 +65,20 @@ else
eval_gettext () {
printf "%s" "# GETTEXT POISON #"
}
-fi
+ ;;
+*)
+ gettext () {
+ printf "%s" "$1"
+ }
+
+ eval_gettext () {
+ printf "%s" "$1" | (
+ export PATH $(git sh-i18n--envsubst --variables "$1");
+ git sh-i18n--envsubst "$1"
+ )
+ }
+ ;;
+esac
# Git-specific wrapper functions
gettextln () {
diff --git a/git-sh-setup.sh b/git-sh-setup.sh
index 1fba6c2de0..5d8e4e6c89 100644
--- a/git-sh-setup.sh
+++ b/git-sh-setup.sh
@@ -200,7 +200,7 @@ get_author_ident_from_commit () {
s/.*/GIT_AUTHOR_EMAIL='\''&'\''/p
g
- s/^author [^<]* <[^>]*> \(.*\)$/\1/
+ s/^author [^<]* <[^>]*> \(.*\)$/@\1/
s/.*/GIT_AUTHOR_DATE='\''&'\''/p
q
diff --git a/git-submodule.sh b/git-submodule.sh
index 3adab93635..9bb2e13e92 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -131,6 +131,7 @@ module_clone()
gitdir=
gitdir_base=
name=$(module_name "$path" 2>/dev/null)
+ test -n "$name" || name="$path"
base_path=$(dirname "$path")
gitdir=$(git rev-parse --git-dir)
diff --git a/git.c b/git.c
index fb9029cbf1..3805616630 100644
--- a/git.c
+++ b/git.c
@@ -495,7 +495,7 @@ static void execv_dashed_external(const char **argv)
* if we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code.
*/
- status = run_command_v_opt(argv, RUN_SILENT_EXEC_FAILURE);
+ status = run_command_v_opt(argv, RUN_SILENT_EXEC_FAILURE | RUN_CLEAN_ON_EXIT);
if (status >= 0 || errno != ENOENT)
exit(status);
diff --git a/git.spec.in b/git.spec.in
index c562c62284..b93df109c8 100644
--- a/git.spec.in
+++ b/git.spec.in
@@ -134,6 +134,7 @@ find $RPM_BUILD_ROOT -type f -name perllocal.pod -exec rm -f {} ';'
%else
rm -rf $RPM_BUILD_ROOT%{_mandir}
%endif
+rm -rf $RPM_BUILD_ROOT%{_datadir}/locale
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d
install -m 644 -T contrib/completion/git-completion.bash $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d/git
diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl
index abb5a79afc..9ee58114d9 100755
--- a/gitweb/gitweb.perl
+++ b/gitweb/gitweb.perl
@@ -760,6 +760,7 @@ our @cgi_param_mapping = (
search_use_regexp => "sr",
ctag => "by_tag",
diff_style => "ds",
+ project_filter => "pf",
# this must be last entry (for manipulation from JavaScript)
javascript => "js"
);
@@ -976,7 +977,7 @@ sub evaluate_path_info {
our ($action, $project, $file_name, $file_parent, $hash, $hash_parent, $hash_base,
$hash_parent_base, @extra_options, $page, $searchtype, $search_use_regexp,
- $searchtext, $search_regexp);
+ $searchtext, $search_regexp, $project_filter);
sub evaluate_and_validate_params {
our $action = $input_params{'action'};
if (defined $action) {
@@ -994,6 +995,13 @@ sub evaluate_and_validate_params {
}
}
+ our $project_filter = $input_params{'project_filter'};
+ if (defined $project_filter) {
+ if (!validate_pathname($project_filter)) {
+ die_error(404, "Invalid project_filter parameter");
+ }
+ }
+
our $file_name = $input_params{'file_name'};
if (defined $file_name) {
if (!validate_pathname($file_name)) {
@@ -1123,8 +1131,10 @@ sub dispatch {
if (!defined $action) {
if (defined $hash) {
$action = git_get_type($hash);
+ $action or die_error(404, "Object does not exist");
} elsif (defined $hash_base && defined $file_name) {
$action = git_get_type("$hash_base:$file_name");
+ $action or die_error(404, "File or directory does not exist");
} elsif (defined $project) {
$action = 'summary';
} else {
@@ -2391,7 +2401,7 @@ sub get_feed_info {
return unless (defined $project);
# some views should link to OPML, or to generic project feed,
# or don't have specific feed yet (so they should use generic)
- return if ($action =~ /^(?:tags|heads|forks|tag|search)$/x);
+ return if (!$action || $action =~ /^(?:tags|heads|forks|tag|search)$/x);
my $branch;
# branches refs uses 'refs/heads/' prefix (fullname) to differentiate
@@ -2827,10 +2837,9 @@ sub git_get_project_url_list {
sub git_get_projects_list {
my $filter = shift || '';
+ my $paranoid = shift;
my @list;
- $filter =~ s/\.git$//;
-
if (-d $projects_list) {
# search in directory
my $dir = $projects_list;
@@ -2839,7 +2848,7 @@ sub git_get_projects_list {
my $pfxlen = length("$dir");
my $pfxdepth = ($dir =~ tr!/!!);
# when filtering, search only given subdirectory
- if ($filter) {
+ if ($filter && !$paranoid) {
$dir .= "/$filter";
$dir =~ s!/+$!!;
}
@@ -2864,6 +2873,10 @@ sub git_get_projects_list {
}
my $path = substr($File::Find::name, $pfxlen + 1);
+ # paranoidly only filter here
+ if ($paranoid && $filter && $path !~ m!^\Q$filter\E/!) {
+ next;
+ }
# we check related file in $projectroot
if (check_export_ok("$projectroot/$path")) {
push @list, { path => $path };
@@ -3729,7 +3742,12 @@ sub run_highlighter {
sub get_page_title {
my $title = to_utf8($site_name);
- return $title unless (defined $project);
+ unless (defined $project) {
+ if (defined $project_filter) {
+ $title .= " - " . to_utf8($project_filter);
+ }
+ return $title;
+ }
$title .= " - " . to_utf8($project);
return $title unless (defined $action);
@@ -3823,12 +3841,27 @@ sub print_header_links {
}
}
+sub print_nav_breadcrumbs_path {
+ my $dirprefix = undef;
+ while (my $part = shift) {
+ $dirprefix .= "/" if defined $dirprefix;
+ $dirprefix .= $part;
+ print $cgi->a({-href => href(project => undef,
+ project_filter => $dirprefix,
+ action => "project_list")},
+ esc_html($part)) . " / ";
+ }
+}
+
sub print_nav_breadcrumbs {
my %opts = @_;
print $cgi->a({-href => esc_url($home_link)}, $home_link_str) . " / ";
if (defined $project) {
- print $cgi->a({-href => href(action=>"summary")}, esc_html($project));
+ my @dirname = split '/', $project;
+ my $projectbasename = pop @dirname;
+ print_nav_breadcrumbs_path(@dirname);
+ print $cgi->a({-href => href(action=>"summary")}, esc_html($projectbasename));
if (defined $action) {
my $action_print = $action ;
if (defined $opts{-action_extra}) {
@@ -3841,6 +3874,8 @@ sub print_nav_breadcrumbs {
print " / $opts{-action_extra}";
}
print "\n";
+ } elsif (defined $project_filter) {
+ print_nav_breadcrumbs_path(split '/', $project_filter);
}
}
@@ -3963,9 +3998,11 @@ sub git_footer_html {
}
} else {
- print $cgi->a({-href => href(project=>undef, action=>"opml"),
+ print $cgi->a({-href => href(project=>undef, action=>"opml",
+ project_filter => $project_filter),
-class => $feed_class}, "OPML") . " ";
- print $cgi->a({-href => href(project=>undef, action=>"project_index"),
+ print $cgi->a({-href => href(project=>undef, action=>"project_index",
+ project_filter => $project_filter),
-class => $feed_class}, "TXT") . "\n";
}
print "</div>\n"; # class="page_footer"
@@ -5123,6 +5160,34 @@ sub git_patchset_body {
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+sub git_project_search_form {
+ my ($searchtext, $search_use_regexp);
+
+ my $limit = '';
+ if ($project_filter) {
+ $limit = " in '$project_filter/'";
+ }
+
+ print "<div class=\"projsearch\">\n";
+ print $cgi->startform(-method => 'get', -action => $my_uri) .
+ $cgi->hidden(-name => 'a', -value => 'project_list') . "\n";
+ print $cgi->hidden(-name => 'pf', -value => $project_filter). "\n"
+ if (defined $project_filter);
+ print $cgi->textfield(-name => 's', -value => $searchtext,
+ -title => "Search project by name and description$limit",
+ -size => 60) . "\n" .
+ "<span title=\"Extended regular expression\">" .
+ $cgi->checkbox(-name => 'sr', -value => 1, -label => 're',
+ -checked => $search_use_regexp) .
+ "</span>\n" .
+ $cgi->submit(-name => 'btnS', -value => 'Search') .
+ $cgi->end_form() . "\n" .
+ $cgi->a({-href => href(project => undef, searchtext => undef,
+ project_filter => $project_filter)},
+ esc_html("List all projects$limit")) . "<br />\n";
+ print "</div>\n";
+}
+
# fills project list info (age, description, owner, category, forks)
# for each project in the list, removing invalid projects from
# returned list
@@ -5979,7 +6044,7 @@ sub git_project_list {
die_error(400, "Unknown order parameter");
}
- my @list = git_get_projects_list();
+ my @list = git_get_projects_list($project_filter, $strict_export);
if (!@list) {
die_error(404, "No projects found");
}
@@ -5990,11 +6055,8 @@ sub git_project_list {
insert_file($home_text);
print "</div>\n";
}
- print $cgi->startform(-method => "get") .
- "<p class=\"projsearch\">Search:\n" .
- $cgi->textfield(-name => "s", -value => $searchtext) . "\n" .
- "</p>" .
- $cgi->end_form() . "\n";
+
+ git_project_search_form($searchtext, $search_use_regexp);
git_project_list_body(\@list, $order);
git_footer_html();
}
@@ -6005,7 +6067,9 @@ sub git_forks {
die_error(400, "Unknown order parameter");
}
- my @list = git_get_projects_list($project);
+ my $filter = $project;
+ $filter =~ s/\.git$//;
+ my @list = git_get_projects_list($filter);
if (!@list) {
die_error(404, "No forks found");
}
@@ -6018,7 +6082,7 @@ sub git_forks {
}
sub git_project_index {
- my @projects = git_get_projects_list();
+ my @projects = git_get_projects_list($project_filter, $strict_export);
if (!@projects) {
die_error(404, "No projects found");
}
@@ -6064,7 +6128,9 @@ sub git_summary {
if ($check_forks) {
# find forks of a project
- @forklist = git_get_projects_list($project);
+ my $filter = $project;
+ $filter =~ s/\.git$//;
+ @forklist = git_get_projects_list($filter);
# filter out forks of forks
@forklist = filter_forks_from_projects_list(\@forklist)
if (@forklist);
@@ -7855,7 +7921,7 @@ sub git_atom {
}
sub git_opml {
- my @list = git_get_projects_list();
+ my @list = git_get_projects_list($project_filter, $strict_export);
if (!@list) {
die_error(404, "No projects found");
}
@@ -7866,11 +7932,17 @@ sub git_opml {
-content_disposition => 'inline; filename="opml.xml"');
my $title = esc_html($site_name);
+ my $filter = " within subdirectory ";
+ if (defined $project_filter) {
+ $filter .= esc_html($project_filter);
+ } else {
+ $filter = "";
+ }
print <<XML;
<?xml version="1.0" encoding="utf-8"?>
<opml version="1.0">
<head>
- <title>$title OPML Export</title>
+ <title>$title OPML Export$filter</title>
</head>
<body>
<outline text="git RSS feeds">
diff --git a/gitweb/static/gitweb.css b/gitweb/static/gitweb.css
index c7827e8f1d..c530355a7c 100644
--- a/gitweb/static/gitweb.css
+++ b/gitweb/static/gitweb.css
@@ -520,8 +520,13 @@ div.search {
right: 12px
}
-p.projsearch {
+div.projsearch {
text-align: center;
+ margin: 20px 0px;
+}
+
+div.projsearch form {
+ margin-bottom: 2px;
}
td.linenr {
diff --git a/imap-send.c b/imap-send.c
index e40125a22b..972ad62cd9 100644
--- a/imap-send.c
+++ b/imap-send.c
@@ -42,28 +42,6 @@ struct store_conf {
unsigned trash_remote_new:1, trash_only_new:1;
};
-struct string_list {
- struct string_list *next;
- char string[1];
-};
-
-struct channel_conf {
- struct channel_conf *next;
- char *name;
- struct store_conf *master, *slave;
- char *master_name, *slave_name;
- char *sync_state;
- struct string_list *patterns;
- int mops, sops;
- unsigned max_messages; /* for slave only */
-};
-
-struct group_conf {
- struct group_conf *next;
- char *name;
- struct string_list *channels;
-};
-
/* For message->status */
#define M_RECENT (1<<0) /* unsyncable flag; maildir_* depend on this being 1<<0 */
#define M_DEAD (1<<1) /* expunged */
@@ -71,7 +49,6 @@ struct group_conf {
struct message {
struct message *next;
- /* struct string_list *keywords; */
size_t size; /* zero implies "not fetched" */
int uid;
unsigned char flags, status;
diff --git a/log-tree.c b/log-tree.c
index c719a6e385..cea8756866 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -120,9 +120,9 @@ static int add_ref_decoration(const char *refname, const unsigned char *sha1, in
type = DECORATION_REF_REMOTE;
else if (!prefixcmp(refname, "refs/tags/"))
type = DECORATION_REF_TAG;
- else if (!prefixcmp(refname, "refs/stash"))
+ else if (!strcmp(refname, "refs/stash"))
type = DECORATION_REF_STASH;
- else if (!prefixcmp(refname, "HEAD"))
+ else if (!strcmp(refname, "HEAD"))
type = DECORATION_REF_HEAD;
if (!cb_data || *(int *)cb_data == DECORATE_SHORT_REFS)
diff --git a/object.c b/object.c
index d8d09f92aa..6b06297a5f 100644
--- a/object.c
+++ b/object.c
@@ -191,10 +191,15 @@ struct object *parse_object(const unsigned char *sha1)
enum object_type type;
int eaten;
const unsigned char *repl = lookup_replace_object(sha1);
- void *buffer = read_sha1_file(sha1, &type, &size);
+ void *buffer;
+ struct object *obj;
+
+ obj = lookup_object(sha1);
+ if (obj && obj->parsed)
+ return obj;
+ buffer = read_sha1_file(sha1, &type, &size);
if (buffer) {
- struct object *obj;
if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {
free(buffer);
error("sha1 mismatch %s\n", sha1_to_hex(repl));
diff --git a/pack-refs.c b/pack-refs.c
index 23bbd00e3e..f09a054228 100644
--- a/pack-refs.c
+++ b/pack-refs.c
@@ -143,7 +143,6 @@ int pack_refs(unsigned int flags)
packed.fd = -1;
if (commit_lock_file(&packed) < 0)
die_errno("unable to overwrite old ref-pack file");
- if (cbdata.flags & PACK_REFS_PRUNE)
- prune_refs(cbdata.ref_to_prune);
+ prune_refs(cbdata.ref_to_prune);
return 0;
}
diff --git a/refs.c b/refs.c
index 6f436f1cb0..b8843bb476 100644
--- a/refs.c
+++ b/refs.c
@@ -17,6 +17,15 @@ struct ref_entry {
struct ref_array {
int nr, alloc;
+
+ /*
+ * Entries with index 0 <= i < sorted are sorted by name. New
+ * entries are appended to the list unsorted, and are sorted
+ * only when required; thus we avoid the need to sort the list
+ * after the addition of every reference.
+ */
+ int sorted;
+
struct ref_entry **refs;
};
@@ -105,12 +114,18 @@ static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2
}
}
+/*
+ * Sort the entries in array (if they are not already sorted).
+ */
static void sort_ref_array(struct ref_array *array)
{
int i, j;
- /* Nothing to sort unless there are at least two entries */
- if (array->nr < 2)
+ /*
+ * This check also prevents passing a zero-length array to qsort(),
+ * which is a problem on some platforms.
+ */
+ if (array->sorted == array->nr)
return;
qsort(array->refs, array->nr, sizeof(*array->refs), ref_entry_cmp);
@@ -124,7 +139,7 @@ static void sort_ref_array(struct ref_array *array)
}
array->refs[++i] = array->refs[j];
}
- array->nr = i + 1;
+ array->sorted = array->nr = i + 1;
}
static struct ref_entry *search_ref_array(struct ref_array *array, const char *refname)
@@ -137,7 +152,7 @@ static struct ref_entry *search_ref_array(struct ref_array *array, const char *r
if (!array->nr)
return NULL;
-
+ sort_ref_array(array);
len = strlen(refname) + 1;
e = xmalloc(sizeof(struct ref_entry) + len);
memcpy(e->name, refname, len);
@@ -168,6 +183,10 @@ static struct ref_cache {
static struct ref_entry *current_ref;
+/*
+ * Never call sort_ref_array() on the extra_refs, because it is
+ * allowed to contain entries with duplicate names.
+ */
static struct ref_array extra_refs;
static void clear_ref_array(struct ref_array *array)
@@ -176,7 +195,7 @@ static void clear_ref_array(struct ref_array *array)
for (i = 0; i < array->nr; i++)
free(array->refs[i]);
free(array->refs);
- array->nr = array->alloc = 0;
+ array->sorted = array->nr = array->alloc = 0;
array->refs = NULL;
}
@@ -268,7 +287,6 @@ static void read_packed_refs(FILE *f, struct ref_array *array)
!get_sha1_hex(refline + 1, sha1))
hashcpy(last->peeled, sha1);
}
- sort_ref_array(array);
}
void add_extra_ref(const char *refname, const unsigned char *sha1, int flag)
@@ -301,6 +319,12 @@ static struct ref_array *get_packed_refs(struct ref_cache *refs)
return &refs->packed;
}
+void add_packed_ref(const char *refname, const unsigned char *sha1)
+{
+ add_ref(get_packed_refs(get_ref_cache(NULL)),
+ create_ref_entry(refname, sha1, REF_ISPACKED, 1));
+}
+
static void get_ref_dir(struct ref_cache *refs, const char *base,
struct ref_array *array)
{
@@ -404,7 +428,6 @@ static struct ref_array *get_loose_refs(struct ref_cache *refs)
{
if (!refs->did_loose) {
get_ref_dir(refs, "refs", &refs->loose);
- sort_ref_array(&refs->loose);
refs->did_loose = 1;
}
return &refs->loose;
@@ -720,6 +743,8 @@ static int do_for_each_ref(const char *submodule, const char *base, each_ref_fn
for (i = 0; i < extra->nr; i++)
retval = do_one_ref(base, fn, trim, flags, cb_data, extra->refs[i]);
+ sort_ref_array(packed);
+ sort_ref_array(loose);
while (p < packed->nr && l < loose->nr) {
struct ref_entry *entry;
int cmp = strcmp(packed->refs[p]->name, loose->refs[l]->name);
diff --git a/refs.h b/refs.h
index d4982915c5..00ba1e2813 100644
--- a/refs.h
+++ b/refs.h
@@ -51,6 +51,12 @@ extern int for_each_rawref(each_ref_fn, void *);
extern void warn_dangling_symref(FILE *fp, const char *msg_fmt, const char *refname);
/*
+ * Add a reference to the in-memory packed reference cache. To actually
+ * write the reference to the packed-refs file, call pack_refs().
+ */
+extern void add_packed_ref(const char *refname, const unsigned char *sha1);
+
+/*
* Extra refs will be listed by for_each_ref() before any actual refs
* for the duration of this process or until clear_extra_refs() is
* called. Only extra refs added before for_each_ref() is called will
diff --git a/remote-curl.c b/remote-curl.c
index 48c20b86f3..d159fe7f34 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -770,7 +770,9 @@ static int push_git(struct discovery *heads, int nr_spec, char **specs)
argv[argc++] = "--thin";
if (options.dry_run)
argv[argc++] = "--dry-run";
- if (options.verbosity > 1)
+ if (options.verbosity == 0)
+ argv[argc++] = "--quiet";
+ else if (options.verbosity > 1)
argv[argc++] = "--verbose";
argv[argc++] = url;
for (i = 0; i < nr_spec; i++)
@@ -805,7 +807,7 @@ static int push(int nr_spec, char **specs)
static void parse_push(struct strbuf *buf)
{
char **specs = NULL;
- int alloc_spec = 0, nr_spec = 0, i;
+ int alloc_spec = 0, nr_spec = 0, i, ret;
do {
if (!prefixcmp(buf->buf, "push ")) {
@@ -822,12 +824,13 @@ static void parse_push(struct strbuf *buf)
break;
} while (1);
- if (push(nr_spec, specs))
- exit(128); /* error already reported */
-
+ ret = push(nr_spec, specs);
printf("\n");
fflush(stdout);
+ if (ret)
+ exit(128); /* error already reported */
+
free_specs:
for (i = 0; i < nr_spec; i++)
free(specs[i]);
diff --git a/remote.c b/remote.c
index 73a3809300..af597b3a62 100644
--- a/remote.c
+++ b/remote.c
@@ -1572,19 +1572,29 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb)
base = branch->merge[0]->dst;
base = shorten_unambiguous_ref(base, 0);
if (!num_theirs)
- strbuf_addf(sb, "Your branch is ahead of '%s' "
- "by %d commit%s.\n",
- base, num_ours, (num_ours == 1) ? "" : "s");
+ strbuf_addf(sb,
+ Q_("Your branch is ahead of '%s' by %d commit.\n",
+ "Your branch is ahead of '%s' by %d commits.\n",
+ num_ours),
+ base, num_ours);
else if (!num_ours)
- strbuf_addf(sb, "Your branch is behind '%s' "
- "by %d commit%s, "
- "and can be fast-forwarded.\n",
- base, num_theirs, (num_theirs == 1) ? "" : "s");
+ strbuf_addf(sb,
+ Q_("Your branch is behind '%s' by %d commit, "
+ "and can be fast-forwarded.\n",
+ "Your branch is behind '%s' by %d commits, "
+ "and can be fast-forwarded.\n",
+ num_theirs),
+ base, num_theirs);
else
- strbuf_addf(sb, "Your branch and '%s' have diverged,\n"
- "and have %d and %d different commit(s) each, "
- "respectively.\n",
- base, num_ours, num_theirs);
+ strbuf_addf(sb,
+ Q_("Your branch and '%s' have diverged,\n"
+ "and have %d and %d different commit each, "
+ "respectively.\n",
+ "Your branch and '%s' have diverged,\n"
+ "and have %d and %d different commits each, "
+ "respectively.\n",
+ num_theirs),
+ base, num_ours, num_theirs);
return 1;
}
diff --git a/revision.c b/revision.c
index 064e351084..c97d834484 100644
--- a/revision.c
+++ b/revision.c
@@ -139,11 +139,32 @@ void mark_tree_uninteresting(struct tree *tree)
void mark_parents_uninteresting(struct commit *commit)
{
- struct commit_list *parents = commit->parents;
+ struct commit_list *parents = NULL, *l;
+
+ for (l = commit->parents; l; l = l->next)
+ commit_list_insert(l->item, &parents);
while (parents) {
struct commit *commit = parents->item;
- if (!(commit->object.flags & UNINTERESTING)) {
+ l = parents;
+ parents = parents->next;
+ free(l);
+
+ while (commit) {
+ /*
+ * A missing commit is ok iff its parent is marked
+ * uninteresting.
+ *
+ * We just mark such a thing parsed, so that when
+ * it is popped next time around, we won't be trying
+ * to parse it and get an error.
+ */
+ if (!has_sha1_file(commit->object.sha1))
+ commit->object.parsed = 1;
+
+ if (commit->object.flags & UNINTERESTING)
+ break;
+
commit->object.flags |= UNINTERESTING;
/*
@@ -154,21 +175,13 @@ void mark_parents_uninteresting(struct commit *commit)
* wasn't uninteresting), in which case we need
* to mark its parents recursively too..
*/
- if (commit->parents)
- mark_parents_uninteresting(commit);
- }
+ if (!commit->parents)
+ break;
- /*
- * A missing commit is ok iff its parent is marked
- * uninteresting.
- *
- * We just mark such a thing parsed, so that when
- * it is popped next time around, we won't be trying
- * to parse it and get an error.
- */
- if (!has_sha1_file(commit->object.sha1))
- commit->object.parsed = 1;
- parents = parents->next;
+ for (l = commit->parents->next; l; l = l->next)
+ commit_list_insert(l->item, &parents);
+ commit = commit->parents->item;
+ }
}
}
@@ -416,7 +429,7 @@ static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit)
static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit)
{
struct commit_list **pp, *parent;
- int tree_changed = 0, tree_same = 0;
+ int tree_changed = 0, tree_same = 0, nth_parent = 0;
/*
* If we don't do pruning, everything is interesting
@@ -444,6 +457,14 @@ static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit)
while ((parent = *pp) != NULL) {
struct commit *p = parent->item;
+ /*
+ * Do not compare with later parents when we care only about
+ * the first parent chain, in order to avoid derailing the
+ * traversal to follow a side branch that brought everything
+ * in the path we are limited to by the pathspec.
+ */
+ if (revs->first_parent_only && nth_parent++)
+ break;
if (parse_commit(p) < 0)
die("cannot simplify commit %s (because of %s)",
sha1_to_hex(commit->object.sha1),
diff --git a/run-command.c b/run-command.c
index 1c51043884..1db8abf984 100644
--- a/run-command.c
+++ b/run-command.c
@@ -1,8 +1,66 @@
#include "cache.h"
#include "run-command.h"
#include "exec_cmd.h"
+#include "sigchain.h"
#include "argv-array.h"
+struct child_to_clean {
+ pid_t pid;
+ struct child_to_clean *next;
+};
+static struct child_to_clean *children_to_clean;
+static int installed_child_cleanup_handler;
+
+static void cleanup_children(int sig)
+{
+ while (children_to_clean) {
+ struct child_to_clean *p = children_to_clean;
+ children_to_clean = p->next;
+ kill(p->pid, sig);
+ free(p);
+ }
+}
+
+static void cleanup_children_on_signal(int sig)
+{
+ cleanup_children(sig);
+ sigchain_pop(sig);
+ raise(sig);
+}
+
+static void cleanup_children_on_exit(void)
+{
+ cleanup_children(SIGTERM);
+}
+
+static void mark_child_for_cleanup(pid_t pid)
+{
+ struct child_to_clean *p = xmalloc(sizeof(*p));
+ p->pid = pid;
+ p->next = children_to_clean;
+ children_to_clean = p;
+
+ if (!installed_child_cleanup_handler) {
+ atexit(cleanup_children_on_exit);
+ sigchain_push_common(cleanup_children_on_signal);
+ installed_child_cleanup_handler = 1;
+ }
+}
+
+static void clear_child_for_cleanup(pid_t pid)
+{
+ struct child_to_clean **last, *p;
+
+ last = &children_to_clean;
+ for (p = children_to_clean; p; p = p->next) {
+ if (p->pid == pid) {
+ *last = p->next;
+ free(p);
+ return;
+ }
+ }
+}
+
static inline void close_pair(int fd[2])
{
close(fd[0]);
@@ -130,6 +188,9 @@ static int wait_or_whine(pid_t pid, const char *argv0, int silent_exec_failure)
} else {
error("waitpid is confused (%s)", argv0);
}
+
+ clear_child_for_cleanup(pid);
+
errno = failed_errno;
return code;
}
@@ -292,6 +353,8 @@ fail_pipe:
if (cmd->pid < 0)
error("cannot fork() for %s: %s", cmd->argv[0],
strerror(failed_errno = errno));
+ else if (cmd->clean_on_exit)
+ mark_child_for_cleanup(cmd->pid);
/*
* Wait for child's execvp. If the execvp succeeds (or if fork()
@@ -312,6 +375,7 @@ fail_pipe:
cmd->pid = -1;
}
close(notify_pipe[0]);
+
}
#else
{
@@ -356,6 +420,8 @@ fail_pipe:
failed_errno = errno;
if (cmd->pid < 0 && (!cmd->silent_exec_failure || errno != ENOENT))
error("cannot spawn %s: %s", cmd->argv[0], strerror(errno));
+ if (cmd->clean_on_exit && cmd->pid >= 0)
+ mark_child_for_cleanup(cmd->pid);
if (cmd->env)
free_environ(env);
@@ -431,6 +497,7 @@ static void prepare_run_command_v_opt(struct child_process *cmd,
cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
cmd->silent_exec_failure = opt & RUN_SILENT_EXEC_FAILURE ? 1 : 0;
cmd->use_shell = opt & RUN_USING_SHELL ? 1 : 0;
+ cmd->clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0;
}
int run_command_v_opt(const char **argv, int opt)
@@ -540,6 +607,8 @@ int start_async(struct async *async)
exit(!!async->proc(proc_in, proc_out, async->data));
}
+ mark_child_for_cleanup(async->pid);
+
if (need_in)
close(fdin[0]);
else if (async->in)
diff --git a/run-command.h b/run-command.h
index 56491b9f23..44f7d2bd42 100644
--- a/run-command.h
+++ b/run-command.h
@@ -38,6 +38,7 @@ struct child_process {
unsigned silent_exec_failure:1;
unsigned stdout_to_stderr:1;
unsigned use_shell:1;
+ unsigned clean_on_exit:1;
void (*preexec_cb)(void);
};
@@ -52,6 +53,7 @@ extern int run_hook(const char *index_file, const char *name, ...);
#define RUN_COMMAND_STDOUT_TO_STDERR 4
#define RUN_SILENT_EXEC_FAILURE 8
#define RUN_USING_SHELL 16
+#define RUN_CLEAN_ON_EXIT 32
int run_command_v_opt(const char **argv, int opt);
/*
diff --git a/sequencer.c b/sequencer.c
index d1f28a6945..5fcbcb8875 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -1,7 +1,20 @@
#include "cache.h"
#include "sequencer.h"
-#include "strbuf.h"
#include "dir.h"
+#include "object.h"
+#include "commit.h"
+#include "tag.h"
+#include "run-command.h"
+#include "exec_cmd.h"
+#include "utf8.h"
+#include "cache-tree.h"
+#include "diff.h"
+#include "revision.h"
+#include "rerere.h"
+#include "merge-recursive.h"
+#include "refs.h"
+
+#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
void remove_sequencer_state(void)
{
@@ -11,3 +24,905 @@ void remove_sequencer_state(void)
remove_dir_recursively(&seq_dir, 0);
strbuf_release(&seq_dir);
}
+
+static const char *action_name(const struct replay_opts *opts)
+{
+ return opts->action == REPLAY_REVERT ? "revert" : "cherry-pick";
+}
+
+static char *get_encoding(const char *message);
+
+struct commit_message {
+ char *parent_label;
+ const char *label;
+ const char *subject;
+ char *reencoded_message;
+ const char *message;
+};
+
+static int get_message(struct commit *commit, struct commit_message *out)
+{
+ const char *encoding;
+ const char *abbrev, *subject;
+ int abbrev_len, subject_len;
+ char *q;
+
+ if (!commit->buffer)
+ return -1;
+ encoding = get_encoding(commit->buffer);
+ if (!encoding)
+ encoding = "UTF-8";
+ if (!git_commit_encoding)
+ git_commit_encoding = "UTF-8";
+
+ out->reencoded_message = NULL;
+ out->message = commit->buffer;
+ if (strcmp(encoding, git_commit_encoding))
+ out->reencoded_message = reencode_string(commit->buffer,
+ git_commit_encoding, encoding);
+ if (out->reencoded_message)
+ out->message = out->reencoded_message;
+
+ abbrev = find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV);
+ abbrev_len = strlen(abbrev);
+
+ subject_len = find_commit_subject(out->message, &subject);
+
+ out->parent_label = xmalloc(strlen("parent of ") + abbrev_len +
+ strlen("... ") + subject_len + 1);
+ q = out->parent_label;
+ q = mempcpy(q, "parent of ", strlen("parent of "));
+ out->label = q;
+ q = mempcpy(q, abbrev, abbrev_len);
+ q = mempcpy(q, "... ", strlen("... "));
+ out->subject = q;
+ q = mempcpy(q, subject, subject_len);
+ *q = '\0';
+ return 0;
+}
+
+static void free_message(struct commit_message *msg)
+{
+ free(msg->parent_label);
+ free(msg->reencoded_message);
+}
+
+static char *get_encoding(const char *message)
+{
+ const char *p = message, *eol;
+
+ while (*p && *p != '\n') {
+ for (eol = p + 1; *eol && *eol != '\n'; eol++)
+ ; /* do nothing */
+ if (!prefixcmp(p, "encoding ")) {
+ char *result = xmalloc(eol - 8 - p);
+ strlcpy(result, p + 9, eol - 8 - p);
+ return result;
+ }
+ p = eol;
+ if (*p == '\n')
+ p++;
+ }
+ return NULL;
+}
+
+static void write_cherry_pick_head(struct commit *commit, const char *pseudoref)
+{
+ const char *filename;
+ int fd;
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "%s\n", sha1_to_hex(commit->object.sha1));
+
+ filename = git_path("%s", pseudoref);
+ fd = open(filename, O_WRONLY | O_CREAT, 0666);
+ if (fd < 0)
+ die_errno(_("Could not open '%s' for writing"), filename);
+ if (write_in_full(fd, buf.buf, buf.len) != buf.len || close(fd))
+ die_errno(_("Could not write to '%s'"), filename);
+ strbuf_release(&buf);
+}
+
+static void print_advice(int show_hint)
+{
+ char *msg = getenv("GIT_CHERRY_PICK_HELP");
+
+ if (msg) {
+ fprintf(stderr, "%s\n", msg);
+ /*
+ * A conflict has occured but the porcelain
+ * (typically rebase --interactive) wants to take care
+ * of the commit itself so remove CHERRY_PICK_HEAD
+ */
+ unlink(git_path("CHERRY_PICK_HEAD"));
+ return;
+ }
+
+ if (show_hint)
+ advise(_("after resolving the conflicts, mark the corrected paths\n"
+ "with 'git add <paths>' or 'git rm <paths>'\n"
+ "and commit the result with 'git commit'"));
+}
+
+static void write_message(struct strbuf *msgbuf, const char *filename)
+{
+ static struct lock_file msg_file;
+
+ int msg_fd = hold_lock_file_for_update(&msg_file, filename,
+ LOCK_DIE_ON_ERROR);
+ if (write_in_full(msg_fd, msgbuf->buf, msgbuf->len) < 0)
+ die_errno(_("Could not write to %s"), filename);
+ strbuf_release(msgbuf);
+ if (commit_lock_file(&msg_file) < 0)
+ die(_("Error wrapping up %s"), filename);
+}
+
+static struct tree *empty_tree(void)
+{
+ return lookup_tree((const unsigned char *)EMPTY_TREE_SHA1_BIN);
+}
+
+static int error_dirty_index(struct replay_opts *opts)
+{
+ if (read_cache_unmerged())
+ return error_resolve_conflict(action_name(opts));
+
+ /* Different translation strings for cherry-pick and revert */
+ if (opts->action == REPLAY_PICK)
+ error(_("Your local changes would be overwritten by cherry-pick."));
+ else
+ error(_("Your local changes would be overwritten by revert."));
+
+ if (advice_commit_before_merge)
+ advise(_("Commit your changes or stash them to proceed."));
+ return -1;
+}
+
+static int fast_forward_to(const unsigned char *to, const unsigned char *from)
+{
+ struct ref_lock *ref_lock;
+
+ read_cache();
+ if (checkout_fast_forward(from, to))
+ exit(1); /* the callee should have complained already */
+ ref_lock = lock_any_ref_for_update("HEAD", from, 0);
+ return write_ref_sha1(ref_lock, to, "cherry-pick");
+}
+
+static int do_recursive_merge(struct commit *base, struct commit *next,
+ const char *base_label, const char *next_label,
+ unsigned char *head, struct strbuf *msgbuf,
+ struct replay_opts *opts)
+{
+ struct merge_options o;
+ struct tree *result, *next_tree, *base_tree, *head_tree;
+ int clean, index_fd;
+ const char **xopt;
+ static struct lock_file index_lock;
+
+ index_fd = hold_locked_index(&index_lock, 1);
+
+ read_cache();
+
+ init_merge_options(&o);
+ o.ancestor = base ? base_label : "(empty tree)";
+ o.branch1 = "HEAD";
+ o.branch2 = next ? next_label : "(empty tree)";
+
+ head_tree = parse_tree_indirect(head);
+ next_tree = next ? next->tree : empty_tree();
+ base_tree = base ? base->tree : empty_tree();
+
+ for (xopt = opts->xopts; xopt != opts->xopts + opts->xopts_nr; xopt++)
+ parse_merge_opt(&o, *xopt);
+
+ clean = merge_trees(&o,
+ head_tree,
+ next_tree, base_tree, &result);
+
+ if (active_cache_changed &&
+ (write_cache(index_fd, active_cache, active_nr) ||
+ commit_locked_index(&index_lock)))
+ /* TRANSLATORS: %s will be "revert" or "cherry-pick" */
+ die(_("%s: Unable to write new index file"), action_name(opts));
+ rollback_lock_file(&index_lock);
+
+ if (!clean) {
+ int i;
+ strbuf_addstr(msgbuf, "\nConflicts:\n\n");
+ for (i = 0; i < active_nr;) {
+ struct cache_entry *ce = active_cache[i++];
+ if (ce_stage(ce)) {
+ strbuf_addch(msgbuf, '\t');
+ strbuf_addstr(msgbuf, ce->name);
+ strbuf_addch(msgbuf, '\n');
+ while (i < active_nr && !strcmp(ce->name,
+ active_cache[i]->name))
+ i++;
+ }
+ }
+ }
+
+ return !clean;
+}
+
+/*
+ * If we are cherry-pick, and if the merge did not result in
+ * hand-editing, we will hit this commit and inherit the original
+ * author date and name.
+ * If we are revert, or if our cherry-pick results in a hand merge,
+ * we had better say that the current user is responsible for that.
+ */
+static int run_git_commit(const char *defmsg, struct replay_opts *opts)
+{
+ /* 6 is max possible length of our args array including NULL */
+ const char *args[6];
+ int i = 0;
+
+ args[i++] = "commit";
+ args[i++] = "-n";
+ if (opts->signoff)
+ args[i++] = "-s";
+ if (!opts->edit) {
+ args[i++] = "-F";
+ args[i++] = defmsg;
+ }
+ args[i] = NULL;
+
+ return run_command_v_opt(args, RUN_GIT_CMD);
+}
+
+static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
+{
+ unsigned char head[20];
+ struct commit *base, *next, *parent;
+ const char *base_label, *next_label;
+ struct commit_message msg = { NULL, NULL, NULL, NULL, NULL };
+ char *defmsg = NULL;
+ struct strbuf msgbuf = STRBUF_INIT;
+ int res;
+
+ if (opts->no_commit) {
+ /*
+ * We do not intend to commit immediately. We just want to
+ * merge the differences in, so let's compute the tree
+ * that represents the "current" state for merge-recursive
+ * to work on.
+ */
+ if (write_cache_as_tree(head, 0, NULL))
+ die (_("Your index file is unmerged."));
+ } else {
+ if (get_sha1("HEAD", head))
+ return error(_("You do not have a valid HEAD"));
+ if (index_differs_from("HEAD", 0))
+ return error_dirty_index(opts);
+ }
+ discard_cache();
+
+ if (!commit->parents) {
+ parent = NULL;
+ }
+ else if (commit->parents->next) {
+ /* Reverting or cherry-picking a merge commit */
+ int cnt;
+ struct commit_list *p;
+
+ if (!opts->mainline)
+ return error(_("Commit %s is a merge but no -m option was given."),
+ sha1_to_hex(commit->object.sha1));
+
+ for (cnt = 1, p = commit->parents;
+ cnt != opts->mainline && p;
+ cnt++)
+ p = p->next;
+ if (cnt != opts->mainline || !p)
+ return error(_("Commit %s does not have parent %d"),
+ sha1_to_hex(commit->object.sha1), opts->mainline);
+ parent = p->item;
+ } else if (0 < opts->mainline)
+ return error(_("Mainline was specified but commit %s is not a merge."),
+ sha1_to_hex(commit->object.sha1));
+ else
+ parent = commit->parents->item;
+
+ if (opts->allow_ff && parent && !hashcmp(parent->object.sha1, head))
+ return fast_forward_to(commit->object.sha1, head);
+
+ if (parent && parse_commit(parent) < 0)
+ /* TRANSLATORS: The first %s will be "revert" or
+ "cherry-pick", the second %s a SHA1 */
+ return error(_("%s: cannot parse parent commit %s"),
+ action_name(opts), sha1_to_hex(parent->object.sha1));
+
+ if (get_message(commit, &msg) != 0)
+ return error(_("Cannot get commit message for %s"),
+ sha1_to_hex(commit->object.sha1));
+
+ /*
+ * "commit" is an existing commit. We would want to apply
+ * the difference it introduces since its first parent "prev"
+ * on top of the current HEAD if we are cherry-pick. Or the
+ * reverse of it if we are revert.
+ */
+
+ defmsg = git_pathdup("MERGE_MSG");
+
+ if (opts->action == REPLAY_REVERT) {
+ base = commit;
+ base_label = msg.label;
+ next = parent;
+ next_label = msg.parent_label;
+ strbuf_addstr(&msgbuf, "Revert \"");
+ strbuf_addstr(&msgbuf, msg.subject);
+ strbuf_addstr(&msgbuf, "\"\n\nThis reverts commit ");
+ strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
+
+ if (commit->parents && commit->parents->next) {
+ strbuf_addstr(&msgbuf, ", reversing\nchanges made to ");
+ strbuf_addstr(&msgbuf, sha1_to_hex(parent->object.sha1));
+ }
+ strbuf_addstr(&msgbuf, ".\n");
+ } else {
+ const char *p;
+
+ base = parent;
+ base_label = msg.parent_label;
+ next = commit;
+ next_label = msg.label;
+
+ /*
+ * Append the commit log message to msgbuf; it starts
+ * after the tree, parent, author, committer
+ * information followed by "\n\n".
+ */
+ p = strstr(msg.message, "\n\n");
+ if (p) {
+ p += 2;
+ strbuf_addstr(&msgbuf, p);
+ }
+
+ if (opts->record_origin) {
+ strbuf_addstr(&msgbuf, "(cherry picked from commit ");
+ strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
+ strbuf_addstr(&msgbuf, ")\n");
+ }
+ }
+
+ if (!opts->strategy || !strcmp(opts->strategy, "recursive") || opts->action == REPLAY_REVERT) {
+ res = do_recursive_merge(base, next, base_label, next_label,
+ head, &msgbuf, opts);
+ write_message(&msgbuf, defmsg);
+ } else {
+ struct commit_list *common = NULL;
+ struct commit_list *remotes = NULL;
+
+ write_message(&msgbuf, defmsg);
+
+ commit_list_insert(base, &common);
+ commit_list_insert(next, &remotes);
+ res = try_merge_command(opts->strategy, opts->xopts_nr, opts->xopts,
+ common, sha1_to_hex(head), remotes);
+ free_commit_list(common);
+ free_commit_list(remotes);
+ }
+
+ /*
+ * If the merge was clean or if it failed due to conflict, we write
+ * CHERRY_PICK_HEAD for the subsequent invocation of commit to use.
+ * However, if the merge did not even start, then we don't want to
+ * write it at all.
+ */
+ if (opts->action == REPLAY_PICK && !opts->no_commit && (res == 0 || res == 1))
+ write_cherry_pick_head(commit, "CHERRY_PICK_HEAD");
+ if (opts->action == REPLAY_REVERT && ((opts->no_commit && res == 0) || res == 1))
+ write_cherry_pick_head(commit, "REVERT_HEAD");
+
+ if (res) {
+ error(opts->action == REPLAY_REVERT
+ ? _("could not revert %s... %s")
+ : _("could not apply %s... %s"),
+ find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV),
+ msg.subject);
+ print_advice(res == 1);
+ rerere(opts->allow_rerere_auto);
+ } else {
+ if (!opts->no_commit)
+ res = run_git_commit(defmsg, opts);
+ }
+
+ free_message(&msg);
+ free(defmsg);
+
+ return res;
+}
+
+static void prepare_revs(struct replay_opts *opts)
+{
+ if (opts->action != REPLAY_REVERT)
+ opts->revs->reverse ^= 1;
+
+ if (prepare_revision_walk(opts->revs))
+ die(_("revision walk setup failed"));
+
+ if (!opts->revs->commits)
+ die(_("empty commit set passed"));
+}
+
+static void read_and_refresh_cache(struct replay_opts *opts)
+{
+ static struct lock_file index_lock;
+ int index_fd = hold_locked_index(&index_lock, 0);
+ if (read_index_preload(&the_index, NULL) < 0)
+ die(_("git %s: failed to read the index"), action_name(opts));
+ refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
+ if (the_index.cache_changed) {
+ if (write_index(&the_index, index_fd) ||
+ commit_locked_index(&index_lock))
+ die(_("git %s: failed to refresh the index"), action_name(opts));
+ }
+ rollback_lock_file(&index_lock);
+}
+
+/*
+ * Append a commit to the end of the commit_list.
+ *
+ * next starts by pointing to the variable that holds the head of an
+ * empty commit_list, and is updated to point to the "next" field of
+ * the last item on the list as new commits are appended.
+ *
+ * Usage example:
+ *
+ * struct commit_list *list;
+ * struct commit_list **next = &list;
+ *
+ * next = commit_list_append(c1, next);
+ * next = commit_list_append(c2, next);
+ * assert(commit_list_count(list) == 2);
+ * return list;
+ */
+static struct commit_list **commit_list_append(struct commit *commit,
+ struct commit_list **next)
+{
+ struct commit_list *new = xmalloc(sizeof(struct commit_list));
+ new->item = commit;
+ *next = new;
+ new->next = NULL;
+ return &new->next;
+}
+
+static int format_todo(struct strbuf *buf, struct commit_list *todo_list,
+ struct replay_opts *opts)
+{
+ struct commit_list *cur = NULL;
+ const char *sha1_abbrev = NULL;
+ const char *action_str = opts->action == REPLAY_REVERT ? "revert" : "pick";
+ const char *subject;
+ int subject_len;
+
+ for (cur = todo_list; cur; cur = cur->next) {
+ sha1_abbrev = find_unique_abbrev(cur->item->object.sha1, DEFAULT_ABBREV);
+ subject_len = find_commit_subject(cur->item->buffer, &subject);
+ strbuf_addf(buf, "%s %s %.*s\n", action_str, sha1_abbrev,
+ subject_len, subject);
+ }
+ return 0;
+}
+
+static struct commit *parse_insn_line(char *bol, char *eol, struct replay_opts *opts)
+{
+ unsigned char commit_sha1[20];
+ enum replay_action action;
+ char *end_of_object_name;
+ int saved, status, padding;
+
+ if (!prefixcmp(bol, "pick")) {
+ action = REPLAY_PICK;
+ bol += strlen("pick");
+ } else if (!prefixcmp(bol, "revert")) {
+ action = REPLAY_REVERT;
+ bol += strlen("revert");
+ } else
+ return NULL;
+
+ /* Eat up extra spaces/ tabs before object name */
+ padding = strspn(bol, " \t");
+ if (!padding)
+ return NULL;
+ bol += padding;
+
+ end_of_object_name = bol + strcspn(bol, " \t\n");
+ saved = *end_of_object_name;
+ *end_of_object_name = '\0';
+ status = get_sha1(bol, commit_sha1);
+ *end_of_object_name = saved;
+
+ /*
+ * Verify that the action matches up with the one in
+ * opts; we don't support arbitrary instructions
+ */
+ if (action != opts->action) {
+ const char *action_str;
+ action_str = action == REPLAY_REVERT ? "revert" : "cherry-pick";
+ error(_("Cannot %s during a %s"), action_str, action_name(opts));
+ return NULL;
+ }
+
+ if (status < 0)
+ return NULL;
+
+ return lookup_commit_reference(commit_sha1);
+}
+
+static int parse_insn_buffer(char *buf, struct commit_list **todo_list,
+ struct replay_opts *opts)
+{
+ struct commit_list **next = todo_list;
+ struct commit *commit;
+ char *p = buf;
+ int i;
+
+ for (i = 1; *p; i++) {
+ char *eol = strchrnul(p, '\n');
+ commit = parse_insn_line(p, eol, opts);
+ if (!commit)
+ return error(_("Could not parse line %d."), i);
+ next = commit_list_append(commit, next);
+ p = *eol ? eol + 1 : eol;
+ }
+ if (!*todo_list)
+ return error(_("No commits parsed."));
+ return 0;
+}
+
+static void read_populate_todo(struct commit_list **todo_list,
+ struct replay_opts *opts)
+{
+ const char *todo_file = git_path(SEQ_TODO_FILE);
+ struct strbuf buf = STRBUF_INIT;
+ int fd, res;
+
+ fd = open(todo_file, O_RDONLY);
+ if (fd < 0)
+ die_errno(_("Could not open %s"), todo_file);
+ if (strbuf_read(&buf, fd, 0) < 0) {
+ close(fd);
+ strbuf_release(&buf);
+ die(_("Could not read %s."), todo_file);
+ }
+ close(fd);
+
+ res = parse_insn_buffer(buf.buf, todo_list, opts);
+ strbuf_release(&buf);
+ if (res)
+ die(_("Unusable instruction sheet: %s"), todo_file);
+}
+
+static int populate_opts_cb(const char *key, const char *value, void *data)
+{
+ struct replay_opts *opts = data;
+ int error_flag = 1;
+
+ if (!value)
+ error_flag = 0;
+ else if (!strcmp(key, "options.no-commit"))
+ opts->no_commit = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.edit"))
+ opts->edit = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.signoff"))
+ opts->signoff = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.record-origin"))
+ opts->record_origin = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.allow-ff"))
+ opts->allow_ff = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.mainline"))
+ opts->mainline = git_config_int(key, value);
+ else if (!strcmp(key, "options.strategy"))
+ git_config_string(&opts->strategy, key, value);
+ else if (!strcmp(key, "options.strategy-option")) {
+ ALLOC_GROW(opts->xopts, opts->xopts_nr + 1, opts->xopts_alloc);
+ opts->xopts[opts->xopts_nr++] = xstrdup(value);
+ } else
+ return error(_("Invalid key: %s"), key);
+
+ if (!error_flag)
+ return error(_("Invalid value for %s: %s"), key, value);
+
+ return 0;
+}
+
+static void read_populate_opts(struct replay_opts **opts_ptr)
+{
+ const char *opts_file = git_path(SEQ_OPTS_FILE);
+
+ if (!file_exists(opts_file))
+ return;
+ if (git_config_from_file(populate_opts_cb, opts_file, *opts_ptr) < 0)
+ die(_("Malformed options sheet: %s"), opts_file);
+}
+
+static void walk_revs_populate_todo(struct commit_list **todo_list,
+ struct replay_opts *opts)
+{
+ struct commit *commit;
+ struct commit_list **next;
+
+ prepare_revs(opts);
+
+ next = todo_list;
+ while ((commit = get_revision(opts->revs)))
+ next = commit_list_append(commit, next);
+}
+
+static int create_seq_dir(void)
+{
+ const char *seq_dir = git_path(SEQ_DIR);
+
+ if (file_exists(seq_dir)) {
+ error(_("a cherry-pick or revert is already in progress"));
+ advise(_("try \"git cherry-pick (--continue | --quit | --abort)\""));
+ return -1;
+ }
+ else if (mkdir(seq_dir, 0777) < 0)
+ die_errno(_("Could not create sequencer directory %s"), seq_dir);
+ return 0;
+}
+
+static void save_head(const char *head)
+{
+ const char *head_file = git_path(SEQ_HEAD_FILE);
+ static struct lock_file head_lock;
+ struct strbuf buf = STRBUF_INIT;
+ int fd;
+
+ fd = hold_lock_file_for_update(&head_lock, head_file, LOCK_DIE_ON_ERROR);
+ strbuf_addf(&buf, "%s\n", head);
+ if (write_in_full(fd, buf.buf, buf.len) < 0)
+ die_errno(_("Could not write to %s"), head_file);
+ if (commit_lock_file(&head_lock) < 0)
+ die(_("Error wrapping up %s."), head_file);
+}
+
+static int reset_for_rollback(const unsigned char *sha1)
+{
+ const char *argv[4]; /* reset --merge <arg> + NULL */
+ argv[0] = "reset";
+ argv[1] = "--merge";
+ argv[2] = sha1_to_hex(sha1);
+ argv[3] = NULL;
+ return run_command_v_opt(argv, RUN_GIT_CMD);
+}
+
+static int rollback_single_pick(void)
+{
+ unsigned char head_sha1[20];
+
+ if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
+ !file_exists(git_path("REVERT_HEAD")))
+ return error(_("no cherry-pick or revert in progress"));
+ if (read_ref_full("HEAD", head_sha1, 0, NULL))
+ return error(_("cannot resolve HEAD"));
+ if (is_null_sha1(head_sha1))
+ return error(_("cannot abort from a branch yet to be born"));
+ return reset_for_rollback(head_sha1);
+}
+
+static int sequencer_rollback(struct replay_opts *opts)
+{
+ const char *filename;
+ FILE *f;
+ unsigned char sha1[20];
+ struct strbuf buf = STRBUF_INIT;
+
+ filename = git_path(SEQ_HEAD_FILE);
+ f = fopen(filename, "r");
+ if (!f && errno == ENOENT) {
+ /*
+ * There is no multiple-cherry-pick in progress.
+ * If CHERRY_PICK_HEAD or REVERT_HEAD indicates
+ * a single-cherry-pick in progress, abort that.
+ */
+ return rollback_single_pick();
+ }
+ if (!f)
+ return error(_("cannot open %s: %s"), filename,
+ strerror(errno));
+ if (strbuf_getline(&buf, f, '\n')) {
+ error(_("cannot read %s: %s"), filename, ferror(f) ?
+ strerror(errno) : _("unexpected end of file"));
+ fclose(f);
+ goto fail;
+ }
+ fclose(f);
+ if (get_sha1_hex(buf.buf, sha1) || buf.buf[40] != '\0') {
+ error(_("stored pre-cherry-pick HEAD file '%s' is corrupt"),
+ filename);
+ goto fail;
+ }
+ if (reset_for_rollback(sha1))
+ goto fail;
+ remove_sequencer_state();
+ strbuf_release(&buf);
+ return 0;
+fail:
+ strbuf_release(&buf);
+ return -1;
+}
+
+static void save_todo(struct commit_list *todo_list, struct replay_opts *opts)
+{
+ const char *todo_file = git_path(SEQ_TODO_FILE);
+ static struct lock_file todo_lock;
+ struct strbuf buf = STRBUF_INIT;
+ int fd;
+
+ fd = hold_lock_file_for_update(&todo_lock, todo_file, LOCK_DIE_ON_ERROR);
+ if (format_todo(&buf, todo_list, opts) < 0)
+ die(_("Could not format %s."), todo_file);
+ if (write_in_full(fd, buf.buf, buf.len) < 0) {
+ strbuf_release(&buf);
+ die_errno(_("Could not write to %s"), todo_file);
+ }
+ if (commit_lock_file(&todo_lock) < 0) {
+ strbuf_release(&buf);
+ die(_("Error wrapping up %s."), todo_file);
+ }
+ strbuf_release(&buf);
+}
+
+static void save_opts(struct replay_opts *opts)
+{
+ const char *opts_file = git_path(SEQ_OPTS_FILE);
+
+ if (opts->no_commit)
+ git_config_set_in_file(opts_file, "options.no-commit", "true");
+ if (opts->edit)
+ git_config_set_in_file(opts_file, "options.edit", "true");
+ if (opts->signoff)
+ git_config_set_in_file(opts_file, "options.signoff", "true");
+ if (opts->record_origin)
+ git_config_set_in_file(opts_file, "options.record-origin", "true");
+ if (opts->allow_ff)
+ git_config_set_in_file(opts_file, "options.allow-ff", "true");
+ if (opts->mainline) {
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_addf(&buf, "%d", opts->mainline);
+ git_config_set_in_file(opts_file, "options.mainline", buf.buf);
+ strbuf_release(&buf);
+ }
+ if (opts->strategy)
+ git_config_set_in_file(opts_file, "options.strategy", opts->strategy);
+ if (opts->xopts) {
+ int i;
+ for (i = 0; i < opts->xopts_nr; i++)
+ git_config_set_multivar_in_file(opts_file,
+ "options.strategy-option",
+ opts->xopts[i], "^$", 0);
+ }
+}
+
+static int pick_commits(struct commit_list *todo_list, struct replay_opts *opts)
+{
+ struct commit_list *cur;
+ int res;
+
+ setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
+ if (opts->allow_ff)
+ assert(!(opts->signoff || opts->no_commit ||
+ opts->record_origin || opts->edit));
+ read_and_refresh_cache(opts);
+
+ for (cur = todo_list; cur; cur = cur->next) {
+ save_todo(cur, opts);
+ res = do_pick_commit(cur->item, opts);
+ if (res)
+ return res;
+ }
+
+ /*
+ * Sequence of picks finished successfully; cleanup by
+ * removing the .git/sequencer directory
+ */
+ remove_sequencer_state();
+ return 0;
+}
+
+static int continue_single_pick(void)
+{
+ const char *argv[] = { "commit", NULL };
+
+ if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
+ !file_exists(git_path("REVERT_HEAD")))
+ return error(_("no cherry-pick or revert in progress"));
+ return run_command_v_opt(argv, RUN_GIT_CMD);
+}
+
+static int sequencer_continue(struct replay_opts *opts)
+{
+ struct commit_list *todo_list = NULL;
+
+ if (!file_exists(git_path(SEQ_TODO_FILE)))
+ return continue_single_pick();
+ read_populate_opts(&opts);
+ read_populate_todo(&todo_list, opts);
+
+ /* Verify that the conflict has been resolved */
+ if (file_exists(git_path("CHERRY_PICK_HEAD")) ||
+ file_exists(git_path("REVERT_HEAD"))) {
+ int ret = continue_single_pick();
+ if (ret)
+ return ret;
+ }
+ if (index_differs_from("HEAD", 0))
+ return error_dirty_index(opts);
+ todo_list = todo_list->next;
+ return pick_commits(todo_list, opts);
+}
+
+static int single_pick(struct commit *cmit, struct replay_opts *opts)
+{
+ setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
+ return do_pick_commit(cmit, opts);
+}
+
+int sequencer_pick_revisions(struct replay_opts *opts)
+{
+ struct commit_list *todo_list = NULL;
+ unsigned char sha1[20];
+
+ if (opts->subcommand == REPLAY_NONE)
+ assert(opts->revs);
+
+ read_and_refresh_cache(opts);
+
+ /*
+ * Decide what to do depending on the arguments; a fresh
+ * cherry-pick should be handled differently from an existing
+ * one that is being continued
+ */
+ if (opts->subcommand == REPLAY_REMOVE_STATE) {
+ remove_sequencer_state();
+ return 0;
+ }
+ if (opts->subcommand == REPLAY_ROLLBACK)
+ return sequencer_rollback(opts);
+ if (opts->subcommand == REPLAY_CONTINUE)
+ return sequencer_continue(opts);
+
+ /*
+ * If we were called as "git cherry-pick <commit>", just
+ * cherry-pick/revert it, set CHERRY_PICK_HEAD /
+ * REVERT_HEAD, and don't touch the sequencer state.
+ * This means it is possible to cherry-pick in the middle
+ * of a cherry-pick sequence.
+ */
+ if (opts->revs->cmdline.nr == 1 &&
+ opts->revs->cmdline.rev->whence == REV_CMD_REV &&
+ opts->revs->no_walk &&
+ !opts->revs->cmdline.rev->flags) {
+ struct commit *cmit;
+ if (prepare_revision_walk(opts->revs))
+ die(_("revision walk setup failed"));
+ cmit = get_revision(opts->revs);
+ if (!cmit || get_revision(opts->revs))
+ die("BUG: expected exactly one commit from walk");
+ return single_pick(cmit, opts);
+ }
+
+ /*
+ * Start a new cherry-pick/ revert sequence; but
+ * first, make sure that an existing one isn't in
+ * progress
+ */
+
+ walk_revs_populate_todo(&todo_list, opts);
+ if (create_seq_dir() < 0)
+ return -1;
+ if (get_sha1("HEAD", sha1)) {
+ if (opts->action == REPLAY_REVERT)
+ return error(_("Can't revert as initial commit"));
+ return error(_("Can't cherry-pick into empty head"));
+ }
+ save_head(sha1_to_hex(sha1));
+ save_opts(opts);
+ return pick_commits(todo_list, opts);
+}
diff --git a/sequencer.h b/sequencer.h
index 2d4528f292..bb4b13830e 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -6,7 +6,44 @@
#define SEQ_TODO_FILE "sequencer/todo"
#define SEQ_OPTS_FILE "sequencer/opts"
+enum replay_action {
+ REPLAY_REVERT,
+ REPLAY_PICK
+};
+
+enum replay_subcommand {
+ REPLAY_NONE,
+ REPLAY_REMOVE_STATE,
+ REPLAY_CONTINUE,
+ REPLAY_ROLLBACK
+};
+
+struct replay_opts {
+ enum replay_action action;
+ enum replay_subcommand subcommand;
+
+ /* Boolean options */
+ int edit;
+ int record_origin;
+ int no_commit;
+ int signoff;
+ int allow_ff;
+ int allow_rerere_auto;
+
+ int mainline;
+
+ /* Merge strategy */
+ const char *strategy;
+ const char **xopts;
+ size_t xopts_nr, xopts_alloc;
+
+ /* Only used by REPLAY_NONE */
+ struct rev_info *revs;
+};
+
/* Removes SEQ_DIR. */
extern void remove_sequencer_state(void);
+int sequencer_pick_revisions(struct replay_opts *opts);
+
#endif
diff --git a/sha1_file.c b/sha1_file.c
index 88f2151ff3..d9aa0e0a2c 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -1202,6 +1202,11 @@ void *map_sha1_file(const unsigned char *sha1, unsigned long *size)
if (!fstat(fd, &st)) {
*size = xsize_t(st.st_size);
+ if (!*size) {
+ /* mmap() is forbidden on empty files */
+ error("object file %s is empty", sha1_file_name(sha1));
+ return NULL;
+ }
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
}
close(fd);
diff --git a/t/Makefile b/t/Makefile
index 9046ec9816..66ceefefcc 100644
--- a/t/Makefile
+++ b/t/Makefile
@@ -17,9 +17,9 @@ DEFAULT_TEST_TARGET ?= test
# Shell quote;
SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
-T = $(wildcard t[0-9][0-9][0-9][0-9]-*.sh)
-TSVN = $(wildcard t91[0-9][0-9]-*.sh)
-TGITWEB = $(wildcard t95[0-9][0-9]-*.sh)
+T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh))
+TSVN = $(sort $(wildcard t91[0-9][0-9]-*.sh))
+TGITWEB = $(sort $(wildcard t95[0-9][0-9]-*.sh))
all: $(DEFAULT_TEST_TARGET)
diff --git a/t/README b/t/README
index c85abaffb3..c09c582c16 100644
--- a/t/README
+++ b/t/README
@@ -548,6 +548,19 @@ library for your script to use.
...
'
+ - test_pause
+
+ This command is useful for writing and debugging tests and must be
+ removed before submitting. It halts the execution of the test and
+ spawns a shell in the trash directory. Exit the shell to continue
+ the test. Example:
+
+ test_expect_success 'test' '
+ git do-something >actual &&
+ test_pause &&
+ test_cmp expected actual
+ '
+
Prerequisites
-------------
diff --git a/t/lib-git-daemon.sh b/t/lib-git-daemon.sh
new file mode 100644
index 0000000000..ef2d01f369
--- /dev/null
+++ b/t/lib-git-daemon.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+
+if test -z "$GIT_TEST_GIT_DAEMON"
+then
+ skip_all="git-daemon testing disabled (define GIT_TEST_GIT_DAEMON to enable)"
+ test_done
+fi
+
+LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-'8121'}
+
+GIT_DAEMON_PID=
+GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
+GIT_DAEMON_URL=git://127.0.0.1:$LIB_GIT_DAEMON_PORT
+
+start_git_daemon() {
+ if test -n "$GIT_DAEMON_PID"
+ then
+ error "start_git_daemon already called"
+ fi
+
+ mkdir -p "$GIT_DAEMON_DOCUMENT_ROOT_PATH"
+
+ trap 'code=$?; stop_git_daemon; (exit $code); die' EXIT
+
+ say >&3 "Starting git daemon ..."
+ mkfifo git_daemon_output
+ git daemon --listen=127.0.0.1 --port="$LIB_GIT_DAEMON_PORT" \
+ --reuseaddr --verbose \
+ --base-path="$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
+ "$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
+ >&3 2>git_daemon_output &
+ GIT_DAEMON_PID=$!
+ {
+ read line
+ echo >&4 "$line"
+ cat >&4 &
+
+ # Check expected output
+ if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble"
+ then
+ kill "$GIT_DAEMON_PID"
+ wait "$GIT_DAEMON_PID"
+ trap 'die' EXIT
+ error "git daemon failed to start"
+ fi
+ } <git_daemon_output
+}
+
+stop_git_daemon() {
+ if test -z "$GIT_DAEMON_PID"
+ then
+ return
+ fi
+
+ trap 'die' EXIT
+
+ # kill git-daemon child of git
+ say >&3 "Stopping git daemon ..."
+ kill "$GIT_DAEMON_PID"
+ wait "$GIT_DAEMON_PID" >&3 2>&4
+ ret=$?
+ # expect exit with status 143 = 128+15 for signal TERM=15
+ if test $ret -ne 143
+ then
+ error "git daemon exited with status: $ret"
+ fi
+ GIT_DAEMON_PID=
+ rm -f git_daemon_output
+}
diff --git a/t/t0080-vcs-svn.sh b/t/t0080-vcs-svn.sh
deleted file mode 100755
index 99a314b080..0000000000
--- a/t/t0080-vcs-svn.sh
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/bin/sh
-
-test_description='check infrastructure for svn importer'
-
-. ./test-lib.sh
-uint32_max=4294967295
-
-test_expect_success 'obj pool: store data' '
- cat <<-\EOF >expected &&
- 0
- 1
- EOF
-
- test-obj-pool <<-\EOF >actual &&
- alloc one 16
- set one 13
- test one 13
- reset one
- EOF
- test_cmp expected actual
-'
-
-test_expect_success 'obj pool: NULL is offset ~0' '
- echo "$uint32_max" >expected &&
- echo null one | test-obj-pool >actual &&
- test_cmp expected actual
-'
-
-test_expect_success 'obj pool: out-of-bounds access' '
- cat <<-EOF >expected &&
- 0
- 0
- $uint32_max
- $uint32_max
- 16
- 20
- $uint32_max
- EOF
-
- test-obj-pool <<-\EOF >actual &&
- alloc one 16
- alloc two 16
- offset one 20
- offset two 20
- alloc one 5
- offset one 20
- free one 1
- offset one 20
- reset one
- reset two
- EOF
- test_cmp expected actual
-'
-
-test_expect_success 'obj pool: high-water mark' '
- cat <<-\EOF >expected &&
- 0
- 0
- 10
- 20
- 20
- 20
- EOF
-
- test-obj-pool <<-\EOF >actual &&
- alloc one 10
- committed one
- alloc one 10
- commit one
- committed one
- alloc one 10
- free one 20
- committed one
- reset one
- EOF
- test_cmp expected actual
-'
-
-test_expect_success 'string pool' '
- echo a does not equal b >expected.differ &&
- echo a equals a >expected.match &&
- echo equals equals equals >expected.matchmore &&
-
- test-string-pool "a,--b" >actual.differ &&
- test-string-pool "a,a" >actual.match &&
- test-string-pool "equals-equals" >actual.matchmore &&
- test_must_fail test-string-pool a,a,a &&
- test_must_fail test-string-pool a &&
-
- test_cmp expected.differ actual.differ &&
- test_cmp expected.match actual.match &&
- test_cmp expected.matchmore actual.matchmore
-'
-
-test_expect_success 'treap sort' '
- cat <<-\EOF >unsorted &&
- 68
- 12
- 13
- 13
- 68
- 13
- 13
- 21
- 10
- 11
- 12
- 13
- 13
- EOF
- sort unsorted >expected &&
-
- test-treap <unsorted >actual &&
- test_cmp expected actual
-'
-
-test_done
diff --git a/t/t0300-credentials.sh b/t/t0300-credentials.sh
index 885af8fb62..8621ab036f 100755
--- a/t/t0300-credentials.sh
+++ b/t/t0300-credentials.sh
@@ -14,22 +14,18 @@ test_expect_success 'setup helper scripts' '
done
EOF
- cat >git-credential-useless <<-\EOF &&
- #!/bin/sh
+ write_script git-credential-useless <<-\EOF &&
. ./dump
exit 0
EOF
- chmod +x git-credential-useless &&
- cat >git-credential-verbatim <<-\EOF &&
- #!/bin/sh
+ write_script git-credential-verbatim <<-\EOF &&
user=$1; shift
pass=$1; shift
. ./dump
test -z "$user" || echo username=$user
test -z "$pass" || echo password=$pass
EOF
- chmod +x git-credential-verbatim &&
PATH="$PWD:$PATH"
'
diff --git a/t/t2203-add-intent.sh b/t/t2203-add-intent.sh
index 25435290a7..ec35409f9c 100755
--- a/t/t2203-add-intent.sh
+++ b/t/t2203-add-intent.sh
@@ -32,7 +32,7 @@ test_expect_success 'intent to add does not clobber existing paths' '
! grep "$empty" actual
'
-test_expect_success 'cannot commit with i-t-a entry' '
+test_expect_success 'i-t-a entry is simply ignored' '
test_tick &&
git commit -a -m initial &&
git reset --hard &&
@@ -41,12 +41,14 @@ test_expect_success 'cannot commit with i-t-a entry' '
echo frotz >nitfol &&
git add rezrov &&
git add -N nitfol &&
- test_must_fail git commit -m initial
+ git commit -m second &&
+ test $(git ls-tree HEAD -- nitfol | wc -l) = 0 &&
+ test $(git diff --name-only HEAD -- nitfol | wc -l) = 1
'
test_expect_success 'can commit with an unrelated i-t-a entry in index' '
git reset --hard &&
- echo xyzzy >rezrov &&
+ echo bozbar >rezrov &&
echo frotz >nitfol &&
git add rezrov &&
git add -N nitfol &&
diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh
index ea82424e47..dd1acebd88 100755
--- a/t/t3200-branch.sh
+++ b/t/t3200-branch.sh
@@ -3,11 +3,8 @@
# Copyright (c) 2005 Amos Waterland
#
-test_description='git branch --foo should not create bogus branch
+test_description='git branch assorted tests'
-This test runs git branch --help and checks that the argument is properly
-handled. Specifically, that a bogus branch is not created.
-'
. ./test-lib.sh
test_expect_success \
@@ -620,4 +617,40 @@ test_expect_success 'use set-upstream on the current branch' '
'
+test_expect_success 'use --edit-description' '
+ write_script editor <<-\EOF &&
+ echo "New contents" >"$1"
+ EOF
+ EDITOR=./editor git branch --edit-description &&
+ write_script editor <<-\EOF &&
+ git stripspace -s <"$1" >"EDITOR_OUTPUT"
+ EOF
+ EDITOR=./editor git branch --edit-description &&
+ echo "New contents" >expect &&
+ test_cmp EDITOR_OUTPUT expect
+'
+
+test_expect_success 'detect typo in branch name when using --edit-description' '
+ write_script editor <<-\EOF &&
+ echo "New contents" >"$1"
+ EOF
+ (
+ EDITOR=./editor &&
+ export EDITOR &&
+ test_must_fail git branch --edit-description no-such-branch
+ )
+'
+
+test_expect_success 'refuse --edit-description on unborn branch for now' '
+ write_script editor <<-\EOF &&
+ echo "New contents" >"$1"
+ EOF
+ git checkout --orphan unborn &&
+ (
+ EDITOR=./editor &&
+ export EDITOR &&
+ test_must_fail git branch --edit-description
+ )
+'
+
test_done
diff --git a/t/t3400-rebase.sh b/t/t3400-rebase.sh
index c355533236..e647272a01 100755
--- a/t/t3400-rebase.sh
+++ b/t/t3400-rebase.sh
@@ -218,4 +218,27 @@ test_expect_success 'rebase -m can copy notes' '
test "a note" = "$(git notes show HEAD)"
'
+test_expect_success 'rebase commit with an ancient timestamp' '
+ git reset --hard &&
+
+ >old.one && git add old.one && test_tick &&
+ git commit --date="@12345 +0400" -m "Old one" &&
+ >old.two && git add old.two && test_tick &&
+ git commit --date="@23456 +0500" -m "Old two" &&
+ >old.three && git add old.three && test_tick &&
+ git commit --date="@34567 +0600" -m "Old three" &&
+
+ git cat-file commit HEAD^^ >actual &&
+ grep "author .* 12345 +0400$" actual &&
+ git cat-file commit HEAD^ >actual &&
+ grep "author .* 23456 +0500$" actual &&
+ git cat-file commit HEAD >actual &&
+ grep "author .* 34567 +0600$" actual &&
+
+ git rebase --onto HEAD^^ HEAD^ &&
+
+ git cat-file commit HEAD >actual &&
+ grep "author .* 34567 +0600$" actual
+'
+
test_done
diff --git a/t/t4015-diff-whitespace.sh b/t/t4015-diff-whitespace.sh
index 9059bcd69e..cc3db1304e 100755
--- a/t/t4015-diff-whitespace.sh
+++ b/t/t4015-diff-whitespace.sh
@@ -103,7 +103,7 @@ test_expect_success 'another test, with -w --ignore-space-at-eol' 'test_cmp expe
git diff -w -b --ignore-space-at-eol > out
test_expect_success 'another test, with -w -b --ignore-space-at-eol' 'test_cmp expect out'
-tr 'Q' '\015' << EOF > expect
+tr 'Q_' '\015 ' << EOF > expect
diff --git a/x b/x
index d99af23..8b32fb5 100644
--- a/x
@@ -111,19 +111,19 @@ index d99af23..8b32fb5 100644
@@ -1,6 +1,6 @@
-whitespace at beginning
+ whitespace at beginning
- whitespace change
+ whitespace change
-whitespace in the middle
+white space in the middle
- whitespace at end
+ whitespace at end__
unchanged line
- CR at endQ
+ CR at end
EOF
git diff -b > out
test_expect_success 'another test, with -b' 'test_cmp expect out'
git diff -b --ignore-space-at-eol > out
test_expect_success 'another test, with -b --ignore-space-at-eol' 'test_cmp expect out'
-tr 'Q' '\015' << EOF > expect
+tr 'Q_' '\015 ' << EOF > expect
diff --git a/x b/x
index d99af23..8b32fb5 100644
--- a/x
@@ -135,9 +135,9 @@ index d99af23..8b32fb5 100644
+ whitespace at beginning
+whitespace change
+white space in the middle
- whitespace at end
+ whitespace at end__
unchanged line
- CR at endQ
+ CR at end
EOF
git diff --ignore-space-at-eol > out
test_expect_success 'another test, with --ignore-space-at-eol' 'test_cmp expect out'
diff --git a/t/t4150-am.sh b/t/t4150-am.sh
index d7d9ccc1c8..8807b602a5 100755
--- a/t/t4150-am.sh
+++ b/t/t4150-am.sh
@@ -237,7 +237,7 @@ test_expect_success 'am stays in branch' '
test_expect_success 'am --signoff does not add Signed-off-by: line if already there' '
git format-patch --stdout HEAD^ >patch3 &&
- sed -e "/^Subject/ s,\[PATCH,Re: Re: Re: & 1/5 v2," patch3 >patch4 &&
+ sed -e "/^Subject/ s,\[PATCH,Re: Re: Re: & 1/5 v2] [foo," patch3 >patch4 &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout HEAD^ &&
@@ -259,7 +259,17 @@ test_expect_success 'am --keep really keeps the subject' '
git am --keep patch4 &&
! test -d .git/rebase-apply &&
git cat-file commit HEAD >actual &&
- grep "Re: Re: Re: \[PATCH 1/5 v2\] third" actual
+ grep "Re: Re: Re: \[PATCH 1/5 v2\] \[foo\] third" actual
+'
+
+test_expect_success 'am --keep-non-patch really keeps the non-patch part' '
+ rm -fr .git/rebase-apply &&
+ git reset --hard &&
+ git checkout HEAD^ &&
+ git am --keep-non-patch patch4 &&
+ ! test -d .git/rebase-apply &&
+ git cat-file commit HEAD >actual &&
+ grep "^\[foo\] third" actual
'
test_expect_success 'am -3 falls back to 3-way merge' '
diff --git a/t/t5150-request-pull.sh b/t/t5150-request-pull.sh
index da25bc2d1f..7c1dc641de 100755
--- a/t/t5150-request-pull.sh
+++ b/t/t5150-request-pull.sh
@@ -179,11 +179,7 @@ test_expect_success 'request names an appropriate branch' '
read repository &&
read branch
} <digest &&
- {
- test "$branch" = full ||
- test "$branch" = master ||
- test "$branch" = for-upstream
- }
+ test "$branch" = tags/full
'
diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh
index 9bf69e9a0f..ce51692bb2 100755
--- a/t/t5500-fetch-pack.sh
+++ b/t/t5500-fetch-pack.sh
@@ -114,8 +114,19 @@ pull_to_client 2nd "refs/heads/B" $((64*3))
pull_to_client 3rd "refs/heads/A" $((1*3))
+test_expect_success 'single branch clone' '
+ git clone --single-branch "file://$(pwd)/." singlebranch
+'
+
+test_expect_success 'single branch object count' '
+ GIT_DIR=singlebranch/.git git count-objects -v |
+ grep "^in-pack:" > count.singlebranch &&
+ echo "in-pack: 198" >expected &&
+ test_cmp expected count.singlebranch
+'
+
test_expect_success 'clone shallow' '
- git clone --depth 2 "file://$(pwd)/." shallow
+ git clone --no-single-branch --depth 2 "file://$(pwd)/." shallow
'
test_expect_success 'clone shallow object count' '
@@ -248,4 +259,71 @@ test_expect_success 'clone shallow object count' '
grep "^count: 52" count.shallow
'
+test_expect_success 'clone shallow without --no-single-branch' '
+ git clone --depth 1 "file://$(pwd)/." shallow2
+'
+
+test_expect_success 'clone shallow object count' '
+ (
+ cd shallow2 &&
+ git count-objects -v
+ ) > count.shallow2 &&
+ grep "^in-pack: 6" count.shallow2
+'
+
+test_expect_success 'clone shallow with --branch' '
+ git clone --depth 1 --branch A "file://$(pwd)/." shallow3
+'
+
+test_expect_success 'clone shallow object count' '
+ echo "in-pack: 12" > count3.expected &&
+ GIT_DIR=shallow3/.git git count-objects -v |
+ grep "^in-pack" > count3.actual &&
+ test_cmp count3.expected count3.actual
+'
+
+test_expect_success 'clone shallow with detached HEAD' '
+ git checkout HEAD^ &&
+ git clone --depth 1 "file://$(pwd)/." shallow5 &&
+ git checkout - &&
+ GIT_DIR=shallow5/.git git rev-parse HEAD >actual &&
+ git rev-parse HEAD^ >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'shallow clone pulling tags' '
+ git tag -a -m A TAGA1 A &&
+ git tag -a -m B TAGB1 B &&
+ git tag TAGA2 A &&
+ git tag TAGB2 B &&
+ git clone --depth 1 "file://$(pwd)/." shallow6 &&
+
+ cat >taglist.expected <<\EOF &&
+TAGB1
+TAGB2
+EOF
+ GIT_DIR=shallow6/.git git tag -l >taglist.actual &&
+ test_cmp taglist.expected taglist.actual &&
+
+ echo "in-pack: 7" > count6.expected &&
+ GIT_DIR=shallow6/.git git count-objects -v |
+ grep "^in-pack" > count6.actual &&
+ test_cmp count6.expected count6.actual
+'
+
+test_expect_success 'shallow cloning single tag' '
+ git clone --depth 1 --branch=TAGB1 "file://$(pwd)/." shallow7 &&
+ cat >taglist.expected <<\EOF &&
+TAGB1
+TAGB2
+EOF
+ GIT_DIR=shallow7/.git git tag -l >taglist.actual &&
+ test_cmp taglist.expected taglist.actual &&
+
+ echo "in-pack: 7" > count7.expected &&
+ GIT_DIR=shallow7/.git git count-objects -v |
+ grep "^in-pack" > count7.actual &&
+ test_cmp count7.expected count7.actual
+'
+
test_done
diff --git a/t/t5523-push-upstream.sh b/t/t5523-push-upstream.sh
index c229fe68f1..9ee52cfc45 100755
--- a/t/t5523-push-upstream.sh
+++ b/t/t5523-push-upstream.sh
@@ -108,4 +108,11 @@ test_expect_failure TTY 'push --no-progress suppresses progress' '
! grep "Writing objects" err
'
+test_expect_success TTY 'quiet push' '
+ ensure_fresh_upstream &&
+
+ test_terminal git push --quiet --no-progress upstream master 2>&1 | tee output &&
+ test_cmp /dev/null output
+'
+
test_done
diff --git a/t/t5541-http-push.sh b/t/t5541-http-push.sh
index 9b85d420c3..d66ed24508 100755
--- a/t/t5541-http-push.sh
+++ b/t/t5541-http-push.sh
@@ -14,6 +14,7 @@ fi
ROOT_PATH="$PWD"
LIB_HTTPD_PORT=${LIB_HTTPD_PORT-'5541'}
. "$TEST_DIRECTORY"/lib-httpd.sh
+. "$TEST_DIRECTORY"/lib-terminal.sh
start_httpd
test_expect_success 'setup remote repository' '
@@ -95,6 +96,32 @@ test_expect_success 'create and delete remote branch' '
test_must_fail git show-ref --verify refs/remotes/origin/dev
'
+cat >"$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update" <<EOF
+#!/bin/sh
+exit 1
+EOF
+chmod a+x "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update"
+
+cat >exp <<EOF
+remote: error: hook declined to update refs/heads/dev2
+To http://127.0.0.1:$LIB_HTTPD_PORT/smart/test_repo.git
+ ! [remote rejected] dev2 -> dev2 (hook declined)
+error: failed to push some refs to 'http://127.0.0.1:5541/smart/test_repo.git'
+EOF
+
+test_expect_success 'rejected update prints status' '
+ cd "$ROOT_PATH"/test_repo_clone &&
+ git checkout -b dev2 &&
+ : >path4 &&
+ git add path4 &&
+ test_tick &&
+ git commit -m dev2 &&
+ test_must_fail git push origin dev2 2>act &&
+ sed -e "/^remote: /s/ *$//" <act >cmp &&
+ test_cmp exp cmp
+'
+rm -f "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update"
+
cat >exp <<EOF
GET /smart/test_repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
@@ -106,6 +133,8 @@ GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
+GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
+POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
EOF
test_expect_success 'used receive-pack service' '
sed -e "
@@ -186,5 +215,12 @@ test_expect_success 'push --mirror to repo with alternates' '
git push --mirror "$HTTPD_URL"/smart/alternates-mirror.git
'
+test_expect_success TTY 'quiet push' '
+ cd "$ROOT_PATH"/test_repo_clone &&
+ test_commit quiet &&
+ test_terminal git push --quiet --no-progress 2>&1 | tee output &&
+ test_cmp /dev/null output
+'
+
stop_httpd
test_done
diff --git a/t/t5570-git-daemon.sh b/t/t5570-git-daemon.sh
new file mode 100755
index 0000000000..7cbc9994a3
--- /dev/null
+++ b/t/t5570-git-daemon.sh
@@ -0,0 +1,148 @@
+#!/bin/sh
+
+test_description='test fetching over git protocol'
+. ./test-lib.sh
+
+LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-5570}
+. "$TEST_DIRECTORY"/lib-git-daemon.sh
+start_git_daemon
+
+test_expect_success 'setup repository' '
+ echo content >file &&
+ git add file &&
+ git commit -m one
+'
+
+test_expect_success 'create git-accessible bare repository' '
+ mkdir "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git" &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git" &&
+ git --bare init &&
+ : >git-daemon-export-ok
+ ) &&
+ git remote add public "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git" &&
+ git push public master:master
+'
+
+test_expect_success 'clone git repository' '
+ git clone "$GIT_DAEMON_URL/repo.git" clone &&
+ test_cmp file clone/file
+'
+
+test_expect_success 'fetch changes via git protocol' '
+ echo content >>file &&
+ git commit -a -m two &&
+ git push public &&
+ (cd clone && git pull) &&
+ test_cmp file clone/file
+'
+
+test_expect_failure 'remote detects correct HEAD' '
+ git push public master:other &&
+ (cd clone &&
+ git remote set-head -d origin &&
+ git remote set-head -a origin &&
+ git symbolic-ref refs/remotes/origin/HEAD > output &&
+ echo refs/remotes/origin/master > expect &&
+ test_cmp expect output
+ )
+'
+
+test_expect_success 'prepare pack objects' '
+ cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git &&
+ git --bare repack -a -d
+ )
+'
+
+test_expect_success 'fetch notices corrupt pack' '
+ cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
+ p=`ls objects/pack/pack-*.pack` &&
+ chmod u+w $p &&
+ printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
+ ) &&
+ mkdir repo_bad1.git &&
+ (cd repo_bad1.git &&
+ git --bare init &&
+ test_must_fail git --bare fetch "$GIT_DAEMON_URL/repo_bad1.git" &&
+ test 0 = `ls objects/pack/pack-*.pack | wc -l`
+ )
+'
+
+test_expect_success 'fetch notices corrupt idx' '
+ cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
+ p=`ls objects/pack/pack-*.idx` &&
+ chmod u+w $p &&
+ printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
+ ) &&
+ mkdir repo_bad2.git &&
+ (cd repo_bad2.git &&
+ git --bare init &&
+ test_must_fail git --bare fetch "$GIT_DAEMON_URL/repo_bad2.git" &&
+ test 0 = `ls objects/pack | wc -l`
+ )
+'
+
+test_remote_error()
+{
+ do_export=YesPlease
+ while test $# -gt 0
+ do
+ case $1 in
+ -x)
+ shift
+ chmod -x "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git"
+ ;;
+ -n)
+ shift
+ do_export=
+ ;;
+ *)
+ break
+ esac
+ done
+
+ if test $# -ne 3
+ then
+ error "invalid number of arguments"
+ fi
+
+ cmd=$1
+ repo=$2
+ msg=$3
+
+ if test -x "$GIT_DAEMON_DOCUMENT_ROOT_PATH/$repo"
+ then
+ if test -n "$do_export"
+ then
+ : >"$GIT_DAEMON_DOCUMENT_ROOT_PATH/$repo/git-daemon-export-ok"
+ else
+ rm -f "$GIT_DAEMON_DOCUMENT_ROOT_PATH/$repo/git-daemon-export-ok"
+ fi
+ fi
+
+ test_must_fail git "$cmd" "$GIT_DAEMON_URL/$repo" 2>output &&
+ echo "fatal: remote error: $msg: /$repo" >expect &&
+ test_cmp expect output
+ ret=$?
+ chmod +x "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git"
+ (exit $ret)
+}
+
+msg="access denied or repository not exported"
+test_expect_success 'clone non-existent' "test_remote_error clone nowhere.git '$msg'"
+test_expect_success 'push disabled' "test_remote_error push repo.git '$msg'"
+test_expect_success 'read access denied' "test_remote_error -x fetch repo.git '$msg'"
+test_expect_success 'not exported' "test_remote_error -n fetch repo.git '$msg'"
+
+stop_git_daemon
+start_git_daemon --informative-errors
+
+test_expect_success 'clone non-existent' "test_remote_error clone nowhere.git 'no such repository'"
+test_expect_success 'push disabled' "test_remote_error push repo.git 'service not enabled'"
+test_expect_success 'read access denied' "test_remote_error -x fetch repo.git 'no such repository'"
+test_expect_success 'not exported' "test_remote_error -n fetch repo.git 'repository not exported'"
+
+stop_git_daemon
+test_done
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index 87ee01662c..67869b4813 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -9,10 +9,13 @@ test_expect_success setup '
rm -fr .git &&
test_create_repo src &&
(
- cd src
- >file
- git add file
- git commit -m initial
+ cd src &&
+ >file &&
+ git add file &&
+ git commit -m initial &&
+ echo 1 >file &&
+ git add file &&
+ git commit -m updated
)
'
@@ -88,6 +91,26 @@ test_expect_success 'clone --mirror' '
'
+test_expect_success 'clone --mirror with detached HEAD' '
+
+ ( cd src && git checkout HEAD^ && git rev-parse HEAD >../expected ) &&
+ git clone --mirror src mirror.detached &&
+ ( cd src && git checkout - ) &&
+ GIT_DIR=mirror.detached git rev-parse HEAD >actual &&
+ test_cmp expected actual
+
+'
+
+test_expect_success 'clone --bare with detached HEAD' '
+
+ ( cd src && git checkout HEAD^ && git rev-parse HEAD >../expected ) &&
+ git clone --bare src bare.detached &&
+ ( cd src && git checkout - ) &&
+ GIT_DIR=bare.detached git rev-parse HEAD >actual &&
+ test_cmp expected actual
+
+'
+
test_expect_success 'clone --bare names the local repository <name>.git' '
git clone --bare src &&
@@ -248,4 +271,13 @@ test_expect_success 'clone from original with relative alternate' '
grep /src/\\.git/objects target-10/objects/info/alternates
'
+test_expect_success 'clone checking out a tag' '
+ git clone --branch=some-tag src dst.tag &&
+ GIT_DIR=src/.git git rev-parse some-tag >expected &&
+ test_cmp expected dst.tag/.git/HEAD &&
+ GIT_DIR=dst.tag/.git git config remote.origin.fetch >fetch.actual &&
+ echo "+refs/heads/*:refs/remotes/origin/*" >fetch.expected &&
+ test_cmp fetch.expected fetch.actual
+'
+
test_done
diff --git a/t/t5706-clone-branch.sh b/t/t5706-clone-branch.sh
index f3f9a76056..56be67e07e 100755
--- a/t/t5706-clone-branch.sh
+++ b/t/t5706-clone-branch.sh
@@ -57,12 +57,8 @@ test_expect_success 'clone -b does not munge remotes/origin/HEAD' '
)
'
-test_expect_success 'clone -b with bogus branch chooses HEAD' '
- git clone -b bogus parent clone-bogus &&
- (cd clone-bogus &&
- check_HEAD master &&
- check_file one
- )
+test_expect_success 'clone -b with bogus branch' '
+ test_must_fail git clone -b bogus parent clone-bogus
'
test_done
diff --git a/t/t6012-rev-list-simplify.sh b/t/t6012-rev-list-simplify.sh
index af34a1e817..839ad97b79 100755
--- a/t/t6012-rev-list-simplify.sh
+++ b/t/t6012-rev-list-simplify.sh
@@ -86,5 +86,6 @@ check_result 'I H E C B A' --full-history --date-order -- file
check_result 'I E C B A' --simplify-merges -- file
check_result 'I B A' -- file
check_result 'I B A' --topo-order -- file
+check_result 'H' --first-parent -- another-file
test_done
diff --git a/t/t7004-tag.sh b/t/t7004-tag.sh
index e93ac73829..4ef79aabc4 100755
--- a/t/t7004-tag.sh
+++ b/t/t7004-tag.sh
@@ -586,6 +586,19 @@ test_expect_success \
test_cmp expect actual
'
+test_expect_success 'annotations for blobs are empty' '
+ blob=$(git hash-object -w --stdin <<-\EOF
+ Blob paragraph 1.
+
+ Blob paragraph 2.
+ EOF
+ ) &&
+ git tag tag-blob $blob &&
+ echo "tag-blob " >expect &&
+ git tag -n1 -l tag-blob >actual &&
+ test_cmp expect actual
+'
+
# trying to verify annotated non-signed tags:
test_expect_success GPG \
diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh
index 33b292b8a8..5b97222c48 100755
--- a/t/t7406-submodule-update.sh
+++ b/t/t7406-submodule-update.sh
@@ -611,4 +611,12 @@ test_expect_success 'submodule update places git-dir in superprojects git-dir re
)
'
+test_expect_success 'submodule add properly re-creates deeper level submodules' '
+ (cd super &&
+ git reset --hard master &&
+ rm -rf deeper/ &&
+ git submodule add ../submodule deeper/submodule
+ )
+'
+
test_done
diff --git a/t/t7600-merge.sh b/t/t7600-merge.sh
index 5d8c428543..9e27bbf902 100755
--- a/t/t7600-merge.sh
+++ b/t/t7600-merge.sh
@@ -27,6 +27,7 @@ Testing basic merge operations/option parsing.
'
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-gpg.sh
printf '%s\n' 1 2 3 4 5 6 7 8 9 >file
printf '%s\n' '1 X' 2 3 4 5 6 7 8 9 >file.1
@@ -670,4 +671,28 @@ test_expect_success 'merge --no-ff --edit' '
test_cmp actual expected
'
+test_expect_success GPG 'merge --ff-only tag' '
+ git reset --hard c0 &&
+ git commit --allow-empty -m "A newer commit" &&
+ git tag -s -m "A newer commit" signed &&
+ git reset --hard c0 &&
+
+ git merge --ff-only signed &&
+ git rev-parse signed^0 >expect &&
+ git rev-parse HEAD >actual &&
+ test_cmp actual expect
+'
+
+test_expect_success GPG 'merge --no-edit tag should skip editor' '
+ git reset --hard c0 &&
+ git commit --allow-empty -m "A newer commit" &&
+ git tag -f -s -m "A newer commit" signed &&
+ git reset --hard c0 &&
+
+ EDITOR=false git merge --no-edit signed &&
+ git rev-parse signed^0 >expect &&
+ git rev-parse HEAD^2 >actual &&
+ test_cmp actual expect
+'
+
test_done
diff --git a/t/t7610-mergetool.sh b/t/t7610-mergetool.sh
index 4aab2a75b8..f5e16fc7db 100755
--- a/t/t7610-mergetool.sh
+++ b/t/t7610-mergetool.sh
@@ -39,6 +39,7 @@ test_expect_success 'setup' '
echo branch1 change >file1 &&
echo branch1 newfile >file2 &&
echo branch1 spaced >"spaced name" &&
+ echo branch1 both added >both &&
echo branch1 change file11 >file11 &&
echo branch1 change file13 >file13 &&
echo branch1 sub >subdir/file3 &&
@@ -50,6 +51,7 @@ test_expect_success 'setup' '
git checkout -b submod-branch1
) &&
git add file1 "spaced name" file11 file13 file2 subdir/file3 submod &&
+ git add both &&
git rm file12 &&
git commit -m "branch1 changes" &&
@@ -58,6 +60,7 @@ test_expect_success 'setup' '
echo master updated >file1 &&
echo master new >file2 &&
echo master updated spaced >"spaced name" &&
+ echo master both added >both &&
echo master updated file12 >file12 &&
echo master updated file14 >file14 &&
echo master new sub >subdir/file3 &&
@@ -69,18 +72,22 @@ test_expect_success 'setup' '
git checkout -b submod-master
) &&
git add file1 "spaced name" file12 file14 file2 subdir/file3 submod &&
+ git add both &&
git rm file11 &&
git commit -m "master updates" &&
git config merge.tool mytool &&
git config mergetool.mytool.cmd "cat \"\$REMOTE\" >\"\$MERGED\"" &&
- git config mergetool.mytool.trustExitCode true
+ git config mergetool.mytool.trustExitCode true &&
+ git config mergetool.mybase.cmd "cat \"\$BASE\" >\"\$MERGED\"" &&
+ git config mergetool.mybase.trustExitCode true
'
test_expect_success 'custom mergetool' '
git checkout -b test1 branch1 &&
git submodule update -N &&
test_must_fail git merge master >/dev/null 2>&1 &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "" | git mergetool file1 file1 ) &&
( yes "" | git mergetool file2 "spaced name" >/dev/null 2>&1 ) &&
( yes "" | git mergetool subdir/file3 >/dev/null 2>&1 ) &&
@@ -101,6 +108,7 @@ test_expect_success 'mergetool crlf' '
( yes "" | git mergetool file1 >/dev/null 2>&1 ) &&
( yes "" | git mergetool file2 >/dev/null 2>&1 ) &&
( yes "" | git mergetool "spaced name" >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "" | git mergetool subdir/file3 >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file12 >/dev/null 2>&1 ) &&
@@ -131,6 +139,7 @@ test_expect_success 'mergetool on file in parent dir' '
cd subdir &&
( yes "" | git mergetool ../file1 >/dev/null 2>&1 ) &&
( yes "" | git mergetool ../file2 ../spaced\ name >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool ../both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool ../file11 >/dev/null 2>&1 ) &&
( yes "d" | git mergetool ../file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool ../submod >/dev/null 2>&1 ) &&
@@ -212,6 +221,7 @@ test_expect_success 'deleted vs modified submodule' '
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
rmdir submod && mv submod-movedaside submod &&
@@ -228,6 +238,7 @@ test_expect_success 'deleted vs modified submodule' '
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
test ! -e submod &&
@@ -241,6 +252,7 @@ test_expect_success 'deleted vs modified submodule' '
test_must_fail git merge test6 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
test ! -e submod &&
@@ -256,6 +268,7 @@ test_expect_success 'deleted vs modified submodule' '
test_must_fail git merge test6 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
test "$(cat submod/bar)" = "master submodule" &&
@@ -279,6 +292,7 @@ test_expect_success 'file vs modified submodule' '
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
rmdir submod && mv submod-movedaside submod &&
@@ -294,6 +308,7 @@ test_expect_success 'file vs modified submodule' '
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
git submodule update -N &&
@@ -309,6 +324,7 @@ test_expect_success 'file vs modified submodule' '
test_must_fail git merge test7 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
test -d submod.orig &&
@@ -324,6 +340,7 @@ test_expect_success 'file vs modified submodule' '
test_must_fail git merge test7 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both>/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
test "$(cat submod/bar)" = "master submodule" &&
@@ -445,4 +462,13 @@ test_expect_success 'directory vs modified submodule' '
git submodule update -N
'
+test_expect_success 'file with no base' '
+ git checkout -b test13 branch1 &&
+ test_must_fail git merge master &&
+ git mergetool --no-prompt --tool mybase -- both &&
+ >expected &&
+ test_cmp both expected &&
+ git reset --hard master >/dev/null 2>&1
+'
+
test_done
diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh
index 7ba5b16f99..75f4716d8c 100755
--- a/t/t7810-grep.sh
+++ b/t/t7810-grep.sh
@@ -246,6 +246,28 @@ do
done
cat >expected <<EOF
+file
+EOF
+test_expect_success 'grep -l -C' '
+ git grep -l -C1 foo >actual &&
+ test_cmp expected actual
+'
+
+cat >expected <<EOF
+file:5
+EOF
+test_expect_success 'grep -l -C' '
+ git grep -c -C1 foo >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'grep -L -C' '
+ git ls-files >expected &&
+ git grep -L -C1 nonexistent_string >actual &&
+ test_cmp expected actual
+'
+
+cat >expected <<EOF
file:foo mmap bar_mmap
EOF
diff --git a/t/t9010-svn-fe.sh b/t/t9010-svn-fe.sh
index 6f6175a8f7..b7eed2489f 100755
--- a/t/t9010-svn-fe.sh
+++ b/t/t9010-svn-fe.sh
@@ -5,8 +5,27 @@ test_description='check svn dumpfile importer'
. ./test-lib.sh
reinit_git () {
+ if ! test_declared_prereq PIPE
+ then
+ echo >&4 "reinit_git: need to declare PIPE prerequisite"
+ return 127
+ fi
rm -fr .git &&
- git init
+ rm -f stream backflow &&
+ git init &&
+ mkfifo stream backflow
+}
+
+try_dump () {
+ input=$1 &&
+ maybe_fail_svnfe=${2:+test_$2} &&
+ maybe_fail_fi=${3:+test_$3} &&
+
+ {
+ $maybe_fail_svnfe test-svn-fe "$input" >stream 3<backflow &
+ } &&
+ $maybe_fail_fi git fast-import --cat-blob-fd=3 <stream 3>backflow &&
+ wait $!
}
properties () {
@@ -35,21 +54,27 @@ text_no_props () {
>empty
-test_expect_success 'empty dump' '
+test_expect_success 'setup: have pipes?' '
+ rm -f frob &&
+ if mkfifo frob
+ then
+ test_set_prereq PIPE
+ fi
+'
+
+test_expect_success PIPE 'empty dump' '
reinit_git &&
echo "SVN-fs-dump-format-version: 2" >input &&
- test-svn-fe input >stream &&
- git fast-import <stream
+ try_dump input
'
-test_expect_success 'v4 dumps not supported' '
+test_expect_success PIPE 'v4 dumps not supported' '
reinit_git &&
echo "SVN-fs-dump-format-version: 4" >v4.dump &&
- test_must_fail test-svn-fe v4.dump >stream &&
- test_cmp empty stream
+ try_dump v4.dump must_fail
'
-test_expect_failure 'empty revision' '
+test_expect_failure PIPE 'empty revision' '
reinit_git &&
printf "rev <nobody, nobody@local>: %s\n" "" "" >expect &&
cat >emptyrev.dump <<-\EOF &&
@@ -64,13 +89,12 @@ test_expect_failure 'empty revision' '
Content-length: 0
EOF
- test-svn-fe emptyrev.dump >stream &&
- git fast-import <stream &&
+ try_dump emptyrev.dump &&
git log -p --format="rev <%an, %ae>: %s" HEAD >actual &&
test_cmp expect actual
'
-test_expect_success 'empty properties' '
+test_expect_success PIPE 'empty properties' '
reinit_git &&
printf "rev <nobody, nobody@local>: %s\n" "" "" >expect &&
cat >emptyprop.dump <<-\EOF &&
@@ -88,13 +112,12 @@ test_expect_success 'empty properties' '
PROPS-END
EOF
- test-svn-fe emptyprop.dump >stream &&
- git fast-import <stream &&
+ try_dump emptyprop.dump &&
git log -p --format="rev <%an, %ae>: %s" HEAD >actual &&
test_cmp expect actual
'
-test_expect_success 'author name and commit message' '
+test_expect_success PIPE 'author name and commit message' '
reinit_git &&
echo "<author@example.com, author@example.com@local>" >expect.author &&
cat >message <<-\EOF &&
@@ -121,15 +144,14 @@ test_expect_success 'author name and commit message' '
echo &&
cat props
} >log.dump &&
- test-svn-fe log.dump >stream &&
- git fast-import <stream &&
+ try_dump log.dump &&
git log -p --format="%B" HEAD >actual.log &&
git log --format="<%an, %ae>" >actual.author &&
test_cmp message actual.log &&
test_cmp expect.author actual.author
'
-test_expect_success 'unsupported properties are ignored' '
+test_expect_success PIPE 'unsupported properties are ignored' '
reinit_git &&
echo author >expect &&
cat >extraprop.dump <<-\EOF &&
@@ -149,13 +171,12 @@ test_expect_success 'unsupported properties are ignored' '
author
PROPS-END
EOF
- test-svn-fe extraprop.dump >stream &&
- git fast-import <stream &&
+ try_dump extraprop.dump &&
git log -p --format=%an HEAD >actual &&
test_cmp expect actual
'
-test_expect_failure 'timestamp and empty file' '
+test_expect_failure PIPE 'timestamp and empty file' '
echo author@example.com >expect.author &&
echo 1999-01-01 >expect.date &&
echo file >expect.files &&
@@ -186,8 +207,7 @@ test_expect_failure 'timestamp and empty file' '
EOF
} >emptyfile.dump &&
- test-svn-fe emptyfile.dump >stream &&
- git fast-import <stream &&
+ try_dump emptyfile.dump &&
git log --format=%an HEAD >actual.author &&
git log --date=short --format=%ad HEAD >actual.date &&
git ls-tree -r --name-only HEAD >actual.files &&
@@ -198,7 +218,7 @@ test_expect_failure 'timestamp and empty file' '
test_cmp empty file
'
-test_expect_success 'directory with files' '
+test_expect_success PIPE 'directory with files' '
reinit_git &&
printf "%s\n" directory/file1 directory/file2 >expect.files &&
echo hi >hi &&
@@ -242,8 +262,7 @@ test_expect_success 'directory with files' '
EOF
text_no_props hi
} >directory.dump &&
- test-svn-fe directory.dump >stream &&
- git fast-import <stream &&
+ try_dump directory.dump &&
git ls-tree -r --name-only HEAD >actual.files &&
git checkout HEAD directory &&
@@ -252,7 +271,107 @@ test_expect_success 'directory with files' '
test_cmp hi directory/file2
'
-test_expect_success 'node without action' '
+test_expect_success PIPE 'branch name with backslash' '
+ reinit_git &&
+ sort <<-\EOF >expect.branch-files &&
+ trunk/file1
+ trunk/file2
+ "branches/UpdateFOPto094\\/file1"
+ "branches/UpdateFOPto094\\/file2"
+ EOF
+
+ echo hi >hi &&
+ echo hello >hello &&
+ {
+ properties \
+ svn:author author@example.com \
+ svn:date "1999-02-02T00:01:02.000000Z" \
+ svn:log "add directory with some files in it" &&
+ echo PROPS-END
+ } >props.setup &&
+ {
+ properties \
+ svn:author brancher@example.com \
+ svn:date "2007-12-06T21:38:34.000000Z" \
+ svn:log "Updating fop to .94 and adjust fo-stylesheets" &&
+ echo PROPS-END
+ } >props.branch &&
+ {
+ cat <<-EOF &&
+ SVN-fs-dump-format-version: 3
+
+ Revision-number: 1
+ EOF
+ echo Prop-content-length: $(wc -c <props.setup) &&
+ echo Content-length: $(wc -c <props.setup) &&
+ echo &&
+ cat props.setup &&
+ cat <<-\EOF &&
+
+ Node-path: trunk
+ Node-kind: dir
+ Node-action: add
+ Prop-content-length: 10
+ Content-length: 10
+
+ PROPS-END
+
+ Node-path: branches
+ Node-kind: dir
+ Node-action: add
+ Prop-content-length: 10
+ Content-length: 10
+
+ PROPS-END
+
+ Node-path: trunk/file1
+ Node-kind: file
+ Node-action: add
+ EOF
+ text_no_props hello &&
+ cat <<-\EOF &&
+ Node-path: trunk/file2
+ Node-kind: file
+ Node-action: add
+ EOF
+ text_no_props hi &&
+ cat <<-\EOF &&
+
+ Revision-number: 2
+ EOF
+ echo Prop-content-length: $(wc -c <props.branch) &&
+ echo Content-length: $(wc -c <props.branch) &&
+ echo &&
+ cat props.branch &&
+ cat <<-\EOF
+
+ Node-path: branches/UpdateFOPto094\
+ Node-kind: dir
+ Node-action: add
+ Node-copyfrom-rev: 1
+ Node-copyfrom-path: trunk
+
+ Node-kind: dir
+ Node-action: add
+ Prop-content-length: 34
+ Content-length: 34
+
+ K 13
+ svn:mergeinfo
+ V 0
+
+ PROPS-END
+ EOF
+ } >branch.dump &&
+ try_dump branch.dump &&
+
+ git ls-tree -r --name-only HEAD |
+ sort >actual.branch-files &&
+ test_cmp expect.branch-files actual.branch-files
+'
+
+test_expect_success PIPE 'node without action' '
+ reinit_git &&
cat >inaction.dump <<-\EOF &&
SVN-fs-dump-format-version: 3
@@ -269,10 +388,11 @@ test_expect_success 'node without action' '
PROPS-END
EOF
- test_must_fail test-svn-fe inaction.dump
+ try_dump inaction.dump must_fail
'
-test_expect_success 'action: add node without text' '
+test_expect_success PIPE 'action: add node without text' '
+ reinit_git &&
cat >textless.dump <<-\EOF &&
SVN-fs-dump-format-version: 3
@@ -290,10 +410,10 @@ test_expect_success 'action: add node without text' '
PROPS-END
EOF
- test_must_fail test-svn-fe textless.dump
+ try_dump textless.dump must_fail
'
-test_expect_failure 'change file mode but keep old content' '
+test_expect_failure PIPE 'change file mode but keep old content' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
@@ -356,8 +476,7 @@ test_expect_failure 'change file mode but keep old content' '
PROPS-END
EOF
- test-svn-fe filemode.dump >stream &&
- git fast-import <stream &&
+ try_dump filemode.dump &&
{
git rev-list HEAD |
git diff-tree --root --stdin |
@@ -370,7 +489,7 @@ test_expect_failure 'change file mode but keep old content' '
test_cmp hello actual.target
'
-test_expect_success 'NUL in property value' '
+test_expect_success PIPE 'NUL in property value' '
reinit_git &&
echo "commit message" >expect.message &&
{
@@ -391,13 +510,12 @@ test_expect_success 'NUL in property value' '
echo &&
cat props
} >nulprop.dump &&
- test-svn-fe nulprop.dump >stream &&
- git fast-import <stream &&
+ try_dump nulprop.dump &&
git diff-tree --always -s --format=%s HEAD >actual.message &&
test_cmp expect.message actual.message
'
-test_expect_success 'NUL in log message, file content, and property name' '
+test_expect_success PIPE 'NUL in log message, file content, and property name' '
# Caveat: svnadmin 1.6.16 (r1073529) truncates at \0 in the
# svn:specialQnotreally example.
reinit_git &&
@@ -458,8 +576,7 @@ test_expect_success 'NUL in log message, file content, and property name' '
link hello
EOF
} >8bitclean.dump &&
- test-svn-fe 8bitclean.dump >stream &&
- git fast-import <stream &&
+ try_dump 8bitclean.dump &&
{
git rev-list HEAD |
git diff-tree --root --stdin |
@@ -478,7 +595,7 @@ test_expect_success 'NUL in log message, file content, and property name' '
test_cmp expect.hello2 actual.hello2
'
-test_expect_success 'change file mode and reiterate content' '
+test_expect_success PIPE 'change file mode and reiterate content' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
@@ -490,7 +607,7 @@ test_expect_success 'change file mode and reiterate content' '
EOF
echo "link hello" >expect.blob &&
echo hello >hello &&
- cat >filemode.dump <<-\EOF &&
+ cat >filemode2.dump <<-\EOF &&
SVN-fs-dump-format-version: 3
Revision-number: 1
@@ -545,8 +662,7 @@ test_expect_success 'change file mode and reiterate content' '
PROPS-END
link hello
EOF
- test-svn-fe filemode.dump >stream &&
- git fast-import <stream &&
+ try_dump filemode2.dump &&
{
git rev-list HEAD |
git diff-tree --root --stdin |
@@ -559,7 +675,8 @@ test_expect_success 'change file mode and reiterate content' '
test_cmp hello actual.target
'
-test_expect_success 'deltas not supported' '
+test_expect_success PIPE 'deltas supported' '
+ reinit_git &&
{
# (old) h + (inline) ello + (old) \n
printf "SVNQ%b%b%s" "Q\003\006\005\004" "\001Q\0204\001\002" "ello" |
@@ -619,10 +736,10 @@ test_expect_success 'deltas not supported' '
echo PROPS-END &&
cat delta
} >delta.dump &&
- test_must_fail test-svn-fe delta.dump
+ try_dump delta.dump
'
-test_expect_success 'property deltas supported' '
+test_expect_success PIPE 'property deltas supported' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
@@ -678,8 +795,7 @@ test_expect_success 'property deltas supported' '
PROPS-END
EOF
} >propdelta.dump &&
- test-svn-fe propdelta.dump >stream &&
- git fast-import <stream &&
+ try_dump propdelta.dump &&
{
git rev-list HEAD |
git diff-tree --stdin |
@@ -688,7 +804,7 @@ test_expect_success 'property deltas supported' '
test_cmp expect actual
'
-test_expect_success 'properties on /' '
+test_expect_success PIPE 'properties on /' '
reinit_git &&
cat <<-\EOF >expect &&
OBJID
@@ -733,8 +849,7 @@ test_expect_success 'properties on /' '
PROPS-END
EOF
- test-svn-fe changeroot.dump >stream &&
- git fast-import <stream &&
+ try_dump changeroot.dump &&
{
git rev-list HEAD |
git diff-tree --root --always --stdin |
@@ -743,7 +858,7 @@ test_expect_success 'properties on /' '
test_cmp expect actual
'
-test_expect_success 'deltas for typechange' '
+test_expect_success PIPE 'deltas for typechange' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
@@ -819,8 +934,7 @@ test_expect_success 'deltas for typechange' '
PROPS-END
link testing 321
EOF
- test-svn-fe deleteprop.dump >stream &&
- git fast-import <stream &&
+ try_dump deleteprop.dump &&
{
git rev-list HEAD |
git diff-tree --root --stdin |
@@ -829,6 +943,143 @@ test_expect_success 'deltas for typechange' '
test_cmp expect actual
'
+test_expect_success PIPE 'deltas need not consume the whole preimage' '
+ reinit_git &&
+ cat >expect <<-\EOF &&
+ OBJID
+ :120000 100644 OBJID OBJID T postimage
+ OBJID
+ :100644 120000 OBJID OBJID T postimage
+ OBJID
+ :000000 100644 OBJID OBJID A postimage
+ EOF
+ echo "first preimage" >expect.1 &&
+ printf target >expect.2 &&
+ printf lnk >expect.3 &&
+ {
+ printf "SVNQ%b%b%b" "QQ\017\001\017" "\0217" "first preimage\n" |
+ q_to_nul
+ } >delta.1 &&
+ {
+ properties svn:special "*" &&
+ echo PROPS-END
+ } >symlink.props &&
+ {
+ printf "SVNQ%b%b%b" "Q\002\013\004\012" "\0201\001\001\0211" "lnk target" |
+ q_to_nul
+ } >delta.2 &&
+ {
+ printf "SVNQ%b%b" "Q\004\003\004Q" "\001Q\002\002" |
+ q_to_nul
+ } >delta.3 &&
+ {
+ cat <<-\EOF &&
+ SVN-fs-dump-format-version: 3
+
+ Revision-number: 1
+ Prop-content-length: 10
+ Content-length: 10
+
+ PROPS-END
+
+ Node-path: postimage
+ Node-kind: file
+ Node-action: add
+ Text-delta: true
+ Prop-content-length: 10
+ EOF
+ echo Text-content-length: $(wc -c <delta.1) &&
+ echo Content-length: $((10 + $(wc -c <delta.1))) &&
+ echo &&
+ echo PROPS-END &&
+ cat delta.1 &&
+ cat <<-\EOF &&
+
+ Revision-number: 2
+ Prop-content-length: 10
+ Content-length: 10
+
+ PROPS-END
+
+ Node-path: postimage
+ Node-kind: file
+ Node-action: change
+ Text-delta: true
+ EOF
+ echo Prop-content-length: $(wc -c <symlink.props) &&
+ echo Text-content-length: $(wc -c <delta.2) &&
+ echo Content-length: $(($(wc -c <symlink.props) + $(wc -c <delta.2))) &&
+ echo &&
+ cat symlink.props &&
+ cat delta.2 &&
+ cat <<-\EOF &&
+
+ Revision-number: 3
+ Prop-content-length: 10
+ Content-length: 10
+
+ PROPS-END
+
+ Node-path: postimage
+ Node-kind: file
+ Node-action: change
+ Text-delta: true
+ Prop-content-length: 10
+ EOF
+ echo Text-content-length: $(wc -c <delta.3) &&
+ echo Content-length: $((10 + $(wc -c <delta.3))) &&
+ echo &&
+ echo PROPS-END &&
+ cat delta.3 &&
+ echo
+ } >deltapartial.dump &&
+ try_dump deltapartial.dump &&
+ {
+ git rev-list HEAD |
+ git diff-tree --root --stdin |
+ sed "s/$_x40/OBJID/g"
+ } >actual &&
+ test_cmp expect actual &&
+ git show HEAD:postimage >actual.3 &&
+ git show HEAD^:postimage >actual.2 &&
+ git show HEAD^^:postimage >actual.1 &&
+ test_cmp expect.1 actual.1 &&
+ test_cmp expect.2 actual.2 &&
+ test_cmp expect.3 actual.3
+'
+
+test_expect_success PIPE 'no hang for delta trying to read past end of preimage' '
+ reinit_git &&
+ {
+ # COPY 1
+ printf "SVNQ%b%b" "Q\001\001\002Q" "\001Q" |
+ q_to_nul
+ } >greedy.delta &&
+ {
+ cat <<-\EOF &&
+ SVN-fs-dump-format-version: 3
+
+ Revision-number: 1
+ Prop-content-length: 10
+ Content-length: 10
+
+ PROPS-END
+
+ Node-path: bootstrap
+ Node-kind: file
+ Node-action: add
+ Text-delta: true
+ Prop-content-length: 10
+ EOF
+ echo Text-content-length: $(wc -c <greedy.delta) &&
+ echo Content-length: $((10 + $(wc -c <greedy.delta))) &&
+ echo &&
+ echo PROPS-END &&
+ cat greedy.delta &&
+ echo
+ } >greedydelta.dump &&
+ try_dump greedydelta.dump must_fail might_fail
+'
test_expect_success 'set up svn repo' '
svnconf=$PWD/svnconf &&
@@ -844,12 +1095,12 @@ test_expect_success 'set up svn repo' '
fi
'
-test_expect_success SVNREPO 't9135/svn.dump' '
- git init simple-git &&
- test-svn-fe "$TEST_DIRECTORY/t9135/svn.dump" >simple.fe &&
+test_expect_success SVNREPO,PIPE 't9135/svn.dump' '
+ mkdir -p simple-git &&
(
cd simple-git &&
- git fast-import <../simple.fe
+ reinit_git &&
+ try_dump "$TEST_DIRECTORY/t9135/svn.dump"
) &&
(
cd simple-svnco &&
diff --git a/t/t9011-svn-da.sh b/t/t9011-svn-da.sh
new file mode 100755
index 0000000000..b38d16f9db
--- /dev/null
+++ b/t/t9011-svn-da.sh
@@ -0,0 +1,248 @@
+#!/bin/sh
+
+test_description='test parsing of svndiff0 files
+
+Using the "test-svn-fe -d" helper, check that svn-fe correctly
+interprets deltas using various facilities (some from the spec,
+some only learned from practice).
+'
+. ./test-lib.sh
+
+>empty
+printf foo >preimage
+
+test_expect_success 'reject empty delta' '
+ test_must_fail test-svn-fe -d preimage empty 0
+'
+
+test_expect_success 'delta can empty file' '
+ printf "SVNQ" | q_to_nul >clear.delta &&
+ test-svn-fe -d preimage clear.delta 4 >actual &&
+ test_cmp empty actual
+'
+
+test_expect_success 'reject svndiff2' '
+ printf "SVN\002" >bad.filetype &&
+ test_must_fail test-svn-fe -d preimage bad.filetype 4
+'
+
+test_expect_success 'one-window empty delta' '
+ printf "SVNQ%s" "QQQQQ" | q_to_nul >clear.onewindow &&
+ test-svn-fe -d preimage clear.onewindow 9 >actual &&
+ test_cmp empty actual
+'
+
+test_expect_success 'reject incomplete window header' '
+ printf "SVNQ%s" "QQQQQ" | q_to_nul >clear.onewindow &&
+ printf "SVNQ%s" "QQ" | q_to_nul >clear.partialwindow &&
+ test_must_fail test-svn-fe -d preimage clear.onewindow 6 &&
+ test_must_fail test-svn-fe -d preimage clear.partialwindow 6
+'
+
+test_expect_success 'reject declared delta longer than actual delta' '
+ printf "SVNQ%s" "QQQQQ" | q_to_nul >clear.onewindow &&
+ printf "SVNQ%s" "QQ" | q_to_nul >clear.partialwindow &&
+ test_must_fail test-svn-fe -d preimage clear.onewindow 14 &&
+ test_must_fail test-svn-fe -d preimage clear.partialwindow 9
+'
+
+test_expect_success 'two-window empty delta' '
+ printf "SVNQ%s%s" "QQQQQ" "QQQQQ" | q_to_nul >clear.twowindow &&
+ test-svn-fe -d preimage clear.twowindow 14 >actual &&
+ test_must_fail test-svn-fe -d preimage clear.twowindow 13 &&
+ test_cmp empty actual
+'
+
+test_expect_success 'noisy zeroes' '
+ printf "SVNQ%s" \
+ "RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRQQQQQ" |
+ tr R "\200" |
+ q_to_nul >clear.noisy &&
+ len=$(wc -c <clear.noisy) &&
+ test-svn-fe -d preimage clear.noisy $len &&
+ test_cmp empty actual
+'
+
+test_expect_success 'reject variable-length int in magic' '
+ printf "SVNRQ" | tr R "\200" | q_to_nul >clear.badmagic &&
+ test_must_fail test-svn-fe -d preimage clear.badmagic 5
+'
+
+test_expect_success 'reject truncated integer' '
+ printf "SVNQ%s%s" "QQQQQ" "QQQQRRQ" |
+ tr R "\200" |
+ q_to_nul >clear.fullint &&
+ printf "SVNQ%s%s" "QQQQQ" "QQQQRR" |
+ tr RT "\201" |
+ q_to_nul >clear.partialint &&
+ test_must_fail test-svn-fe -d preimage clear.fullint 15 &&
+ test-svn-fe -d preimage clear.fullint 16 &&
+ test_must_fail test-svn-fe -d preimage clear.partialint 15
+'
+
+test_expect_success 'nonempty (but unused) preimage view' '
+ printf "SVNQ%b" "Q\003QQQ" | q_to_nul >clear.readpreimage &&
+ test-svn-fe -d preimage clear.readpreimage 9 >actual &&
+ test_cmp empty actual
+'
+
+test_expect_success 'preimage view: right endpoint cannot backtrack' '
+ printf "SVNQ%b%b" "Q\003QQQ" "Q\002QQQ" |
+ q_to_nul >clear.backtrack &&
+ test_must_fail test-svn-fe -d preimage clear.backtrack 14
+'
+
+test_expect_success 'preimage view: left endpoint can advance' '
+ printf "SVNQ%b%b" "Q\003QQQ" "\001\002QQQ" |
+ q_to_nul >clear.preshrink &&
+ printf "SVNQ%b%b" "Q\003QQQ" "\001\001QQQ" |
+ q_to_nul >clear.shrinkbacktrack &&
+ test-svn-fe -d preimage clear.preshrink 14 >actual &&
+ test_must_fail test-svn-fe -d preimage clear.shrinkbacktrack 14 &&
+ test_cmp empty actual
+'
+
+test_expect_success 'preimage view: offsets compared by value' '
+ printf "SVNQ%b%b" "\001\001QQQ" "\0200Q\003QQQ" |
+ q_to_nul >clear.noisybacktrack &&
+ printf "SVNQ%b%b" "\001\001QQQ" "\0200\001\002QQQ" |
+ q_to_nul >clear.noisyadvance &&
+ test_must_fail test-svn-fe -d preimage clear.noisybacktrack 15 &&
+ test-svn-fe -d preimage clear.noisyadvance 15 &&
+ test_cmp empty actual
+'
+
+test_expect_success 'preimage view: reject truncated preimage' '
+ printf "SVNQ%b" "\010QQQQ" | q_to_nul >clear.lateemptyread &&
+ printf "SVNQ%b" "\010\001QQQ" | q_to_nul >clear.latenonemptyread &&
+ printf "SVNQ%b" "\001\010QQQ" | q_to_nul >clear.longread &&
+ test_must_fail test-svn-fe -d preimage clear.lateemptyread 9 &&
+ test_must_fail test-svn-fe -d preimage clear.latenonemptyread 9 &&
+ test_must_fail test-svn-fe -d preimage clear.longread 9
+'
+
+test_expect_success 'forbid unconsumed inline data' '
+ printf "SVNQ%b%s%b%s" "QQQQ\003" "bar" "QQQQ\001" "x" |
+ q_to_nul >inline.clear &&
+ test_must_fail test-svn-fe -d preimage inline.clear 18 >actual
+'
+
+test_expect_success 'reject truncated inline data' '
+ printf "SVNQ%b%s" "QQQQ\003" "b" | q_to_nul >inline.trunc &&
+ test_must_fail test-svn-fe -d preimage inline.trunc 10
+'
+
+test_expect_success 'reject truncated inline data (after instruction section)' '
+ printf "SVNQ%b%b%s" "QQ\001\001\003" "\0201" "b" | q_to_nul >insn.trunc &&
+ test_must_fail test-svn-fe -d preimage insn.trunc 11
+'
+
+test_expect_success 'copyfrom_data' '
+ echo hi >expect &&
+ printf "SVNQ%b%b%b" "QQ\003\001\003" "\0203" "hi\n" | q_to_nul >copydat &&
+ test-svn-fe -d preimage copydat 13 >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'multiple copyfrom_data' '
+ echo hi >expect &&
+ printf "SVNQ%b%b%b%b%b" "QQ\003\002\003" "\0201\0202" "hi\n" \
+ "QQQ\002Q" "\0200Q" | q_to_nul >copy.multi &&
+ len=$(wc -c <copy.multi) &&
+ test-svn-fe -d preimage copy.multi $len >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'incomplete multiple insn' '
+ printf "SVNQ%b%b%b" "QQ\003\002\003" "\0203\0200" "hi\n" |
+ q_to_nul >copy.partial &&
+ len=$(wc -c <copy.partial) &&
+ test_must_fail test-svn-fe -d preimage copy.partial $len
+'
+
+test_expect_success 'catch attempt to copy missing data' '
+ printf "SVNQ%b%b%s%b%s" "QQ\002\002\001" "\0201\0201" "X" \
+ "QQQQ\002" "YZ" |
+ q_to_nul >copy.incomplete &&
+ len=$(wc -c <copy.incomplete) &&
+ test_must_fail test-svn-fe -d preimage copy.incomplete $len
+'
+
+test_expect_success 'copyfrom target to repeat data' '
+ printf foofoo >expect &&
+ printf "SVNQ%b%b%s" "QQ\006\004\003" "\0203\0100\003Q" "foo" |
+ q_to_nul >copytarget.repeat &&
+ len=$(wc -c <copytarget.repeat) &&
+ test-svn-fe -d preimage copytarget.repeat $len >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'copyfrom target out of order' '
+ printf foooof >expect &&
+ printf "SVNQ%b%b%s" \
+ "QQ\006\007\003" "\0203\0101\002\0101\001\0101Q" "foo" |
+ q_to_nul >copytarget.reverse &&
+ len=$(wc -c <copytarget.reverse) &&
+ test-svn-fe -d preimage copytarget.reverse $len >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'catch copyfrom future' '
+ printf "SVNQ%b%b%s" "QQ\004\004\003" "\0202\0101\002\0201" "XYZ" |
+ q_to_nul >copytarget.infuture &&
+ len=$(wc -c <copytarget.infuture) &&
+ test_must_fail test-svn-fe -d preimage copytarget.infuture $len
+'
+
+test_expect_success 'copy to sustain' '
+ printf XYXYXYXYXYXZ >expect &&
+ printf "SVNQ%b%b%s" "QQ\014\004\003" "\0202\0111Q\0201" "XYZ" |
+ q_to_nul >copytarget.sustain &&
+ len=$(wc -c <copytarget.sustain) &&
+ test-svn-fe -d preimage copytarget.sustain $len >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'catch copy that overflows' '
+ printf "SVNQ%b%b%s" "QQ\003\003\001" "\0201\0177Q" X |
+ q_to_nul >copytarget.overflow &&
+ len=$(wc -c <copytarget.overflow) &&
+ test_must_fail test-svn-fe -d preimage copytarget.overflow $len
+'
+
+test_expect_success 'copyfrom source' '
+ printf foo >expect &&
+ printf "SVNQ%b%b" "Q\003\003\002Q" "\003Q" | q_to_nul >copysource.all &&
+ test-svn-fe -d preimage copysource.all 11 >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'copy backwards' '
+ printf oof >expect &&
+ printf "SVNQ%b%b" "Q\003\003\006Q" "\001\002\001\001\001Q" |
+ q_to_nul >copysource.rev &&
+ test-svn-fe -d preimage copysource.rev 15 >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'offsets are relative to window' '
+ printf fo >expect &&
+ printf "SVNQ%b%b%b%b" "Q\003\001\002Q" "\001Q" \
+ "\002\001\001\002Q" "\001Q" |
+ q_to_nul >copysource.two &&
+ test-svn-fe -d preimage copysource.two 18 >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'example from notes/svndiff' '
+ printf aaaaccccdddddddd >expect &&
+ printf aaaabbbbcccc >source &&
+ printf "SVNQ%b%b%s" "Q\014\020\007\001" \
+ "\004Q\004\010\0201\0107\010" d |
+ q_to_nul >delta.example &&
+ len=$(wc -c <delta.example) &&
+ test-svn-fe -d source delta.example $len >actual &&
+ test_cmp expect actual
+'
+
+test_done
diff --git a/t/t9500-gitweb-standalone-no-errors.sh b/t/t9500-gitweb-standalone-no-errors.sh
index ab24917812..0f771c673d 100755
--- a/t/t9500-gitweb-standalone-no-errors.sh
+++ b/t/t9500-gitweb-standalone-no-errors.sh
@@ -475,6 +475,14 @@ test_expect_success \
'gitweb_run "" "/.git/master:foo/"'
test_expect_success \
+ 'path_info: project/branch (non-existent)' \
+ 'gitweb_run "" "/.git/non-existent"'
+
+test_expect_success \
+ 'path_info: project/branch:filename (non-existent branch)' \
+ 'gitweb_run "" "/.git/non-existent:non-existent"'
+
+test_expect_success \
'path_info: project/branch:file (non-existent)' \
'gitweb_run "" "/.git/master:non-existent"'
diff --git a/t/t9801-git-p4-branch.sh b/t/t9801-git-p4-branch.sh
index a25f18d36a..d414705416 100755
--- a/t/t9801-git-p4-branch.sh
+++ b/t/t9801-git-p4-branch.sh
@@ -172,9 +172,9 @@ test_expect_success 'add simple p4 branches' '
echo file1 >file1 &&
echo file2 >file2 &&
p4 add file1 file2 &&
- p4 submit -d "branch1" &&
+ p4 submit -d "Create branch1" &&
p4 integrate //depot/branch1/... //depot/branch2/... &&
- p4 submit -d "branch2" &&
+ p4 submit -d "Integrate branch2 from branch1" &&
echo file3 >file3 &&
p4 add file3 &&
p4 submit -d "add file3 in branch1" &&
@@ -182,7 +182,7 @@ test_expect_success 'add simple p4 branches' '
echo update >>file2 &&
p4 submit -d "update file2 in branch1" &&
p4 integrate //depot/branch1/... //depot/branch3/... &&
- p4 submit -d "branch3"
+ p4 submit -d "Integrate branch3 from branch1"
)
'
@@ -203,17 +203,17 @@ test_expect_success 'git-p4 clone simple branches' '
test -f file1 &&
test -f file2 &&
test -f file3 &&
- grep -q update file2 &&
+ grep update file2 &&
git reset --hard p4/depot/branch2 &&
test -f file1 &&
test -f file2 &&
test ! -f file3 &&
- test_must_fail grep -q update file2 &&
+ ! grep update file2 &&
git reset --hard p4/depot/branch3 &&
test -f file1 &&
test -f file2 &&
test -f file3 &&
- grep -q update file2 &&
+ grep update file2 &&
cd "$cli" &&
cd branch1 &&
p4 edit file2 &&
@@ -222,7 +222,87 @@ test_expect_success 'git-p4 clone simple branches' '
cd "$git" &&
git reset --hard p4/depot/branch1 &&
"$GITP4" rebase &&
- grep -q file2_ file2
+ grep file2_ file2
+ )
+'
+
+# Create a complex branch structure in P4 depot to check if they are correctly
+# cloned. The branches are created from older changelists to check if git-p4 is
+# able to correctly detect them.
+# The final expected structure is:
+# `branch1
+# | `- file1
+# | `- file2 (updated)
+# | `- file3
+# `branch2
+# | `- file1
+# | `- file2
+# `branch3
+# | `- file1
+# | `- file2 (updated)
+# | `- file3
+# `branch4
+# | `- file1
+# | `- file2
+# `branch5
+# `- file1
+# `- file2
+# `- file3
+test_expect_success 'git-p4 add complex branches' '
+ test_when_finished cleanup_git &&
+ test_create_repo "$git" &&
+ (
+ cd "$cli" &&
+ changelist=$(p4 changes -m1 //depot/... | cut -d" " -f2) &&
+ changelist=$(($changelist - 5)) &&
+ p4 integrate //depot/branch1/...@$changelist //depot/branch4/... &&
+ p4 submit -d "Integrate branch4 from branch1@${changelist}" &&
+ changelist=$(($changelist + 2)) &&
+ p4 integrate //depot/branch1/...@$changelist //depot/branch5/... &&
+ p4 submit -d "Integrate branch5 from branch1@${changelist}"
+ )
+'
+
+# Configure branches through git-config and clone them. git-p4 will only be able
+# to clone the original structure if it is able to detect the origin changelist
+# of each branch.
+test_expect_success 'git-p4 clone complex branches' '
+ test_when_finished cleanup_git &&
+ test_create_repo "$git" &&
+ (
+ cd "$git" &&
+ git config git-p4.branchList branch1:branch2 &&
+ git config --add git-p4.branchList branch1:branch3 &&
+ git config --add git-p4.branchList branch1:branch4 &&
+ git config --add git-p4.branchList branch1:branch5 &&
+ "$GITP4" clone --dest=. --detect-branches //depot@all &&
+ git log --all --graph --decorate --stat &&
+ git reset --hard p4/depot/branch1 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_file file3 &&
+ grep update file2 &&
+ git reset --hard p4/depot/branch2 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_missing file3 &&
+ ! grep update file2 &&
+ git reset --hard p4/depot/branch3 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_file file3 &&
+ grep update file2 &&
+ git reset --hard p4/depot/branch4 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_missing file3 &&
+ ! grep update file2 &&
+ git reset --hard p4/depot/branch5 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_file file3 &&
+ ! grep update file2 &&
+ test_path_is_missing .git/git-p4-tmp
)
'
diff --git a/t/t9803-git-p4-shell-metachars.sh b/t/t9803-git-p4-shell-metachars.sh
index db04375a13..db670207bd 100755
--- a/t/t9803-git-p4-shell-metachars.sh
+++ b/t/t9803-git-p4-shell-metachars.sh
@@ -57,6 +57,54 @@ test_expect_success 'deleting with shell metachars' '
)
'
+# Create a branch with a shell metachar in its name
+#
+# 1. //depot/main
+# 2. //depot/branch$3
+
+test_expect_success 'branch with shell char' '
+ test_when_finished cleanup_git &&
+ test_create_repo "$git" &&
+ (
+ cd "$cli" &&
+
+ mkdir -p main &&
+
+ echo f1 >main/f1 &&
+ p4 add main/f1 &&
+ p4 submit -d "main/f1" &&
+
+ p4 integrate //depot/main/... //depot/branch\$3/... &&
+ p4 submit -d "integrate main to branch\$3" &&
+
+ echo f1 >branch\$3/shell_char_branch_file &&
+ p4 add branch\$3/shell_char_branch_file &&
+ p4 submit -d "branch\$3/shell_char_branch_file" &&
+
+ p4 branch -i <<-EOF &&
+ Branch: branch\$3
+ View: //depot/main/... //depot/branch\$3/...
+ EOF
+
+ p4 edit main/f1 &&
+ echo "a change" >> main/f1 &&
+ p4 submit -d "a change" main/f1 &&
+
+ p4 integrate -b branch\$3 &&
+ p4 resolve -am branch\$3/... &&
+ p4 submit -d "integrate main to branch\$3" &&
+
+ cd "$git" &&
+
+ git config git-p4.branchList main:branch\$3 &&
+ "$GITP4" clone --dest=. --detect-branches //depot@all &&
+ git log --all --graph --decorate --stat &&
+ git reset --hard p4/depot/branch\$3 &&
+ test -f shell_char_branch_file &&
+ test -f f1
+ )
+'
+
test_expect_success 'kill p4d' '
kill_p4d
'
diff --git a/t/t9804-git-p4-label.sh b/t/t9804-git-p4-label.sh
new file mode 100755
index 0000000000..80d01ea438
--- /dev/null
+++ b/t/t9804-git-p4-label.sh
@@ -0,0 +1,113 @@
+test_description='git-p4 p4 label tests'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+# Basic p4 label tests.
+#
+# Note: can't have more than one label per commit - others
+# are silently discarded.
+#
+test_expect_success 'basic p4 labels' '
+ test_when_finished cleanup_git &&
+ (
+ cd "$cli" &&
+ mkdir -p main &&
+
+ echo f1 >main/f1 &&
+ p4 add main/f1 &&
+ p4 submit -d "main/f1" &&
+
+ echo f2 >main/f2 &&
+ p4 add main/f2 &&
+ p4 submit -d "main/f2" &&
+
+ echo f3 >main/file_with_\$metachar &&
+ p4 add main/file_with_\$metachar &&
+ p4 submit -d "file with metachar" &&
+
+ p4 tag -l tag_f1_only main/f1 &&
+ p4 tag -l tag_with\$_shell_char main/... &&
+
+ echo f4 >main/f4 &&
+ p4 add main/f4 &&
+ p4 submit -d "main/f4" &&
+
+ p4 label -i <<-EOF &&
+ Label: long_label
+ Description:
+ A Label first line
+ A Label second line
+ View: //depot/...
+ EOF
+
+ p4 tag -l long_label ... &&
+
+ p4 labels ... &&
+
+ "$GITP4" clone --dest="$git" --detect-labels //depot@all &&
+ cd "$git" &&
+
+ git tag &&
+ git tag >taglist &&
+ test_line_count = 3 taglist &&
+
+ cd main &&
+ git checkout tag_tag_f1_only &&
+ ! test -f f2 &&
+ git checkout tag_tag_with\$_shell_char &&
+ test -f f1 && test -f f2 && test -f file_with_\$metachar &&
+
+ git show tag_long_label | grep -q "A Label second line"
+ )
+'
+
+# Test some label corner cases:
+#
+# - two tags on the same file; both should be available
+# - a tag that is only on one file; this kind of tag
+# cannot be imported (at least not easily).
+
+test_expect_failure 'two labels on the same changelist' '
+ test_when_finished cleanup_git &&
+ (
+ cd "$cli" &&
+ mkdir -p main &&
+
+ p4 edit main/f1 main/f2 &&
+ echo "hello world" >main/f1 &&
+ echo "not in the tag" >main/f2 &&
+ p4 submit -d "main/f[12]: testing two labels" &&
+
+ p4 tag -l tag_f1_1 main/... &&
+ p4 tag -l tag_f1_2 main/... &&
+
+ p4 labels ... &&
+
+ "$GITP4" clone --dest="$git" --detect-labels //depot@all &&
+ cd "$git" &&
+
+ git tag | grep tag_f1 &&
+ git tag | grep -q tag_f1_1 &&
+ git tag | grep -q tag_f1_2 &&
+
+ cd main &&
+
+ git checkout tag_tag_f1_1 &&
+ ls &&
+ test -f f1 &&
+
+ git checkout tag_tag_f1_2 &&
+ ls &&
+ test -f f1
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
diff --git a/t/t9806-git-p4-options.sh b/t/t9806-git-p4-options.sh
index 1f1952a657..0571602129 100755
--- a/t/t9806-git-p4-options.sh
+++ b/t/t9806-git-p4-options.sh
@@ -146,7 +146,7 @@ test_expect_success 'clone --use-client-spec' '
(
cd "$git" &&
test_path_is_file bus/dir/f4 &&
- test_path_is_file file1
+ test_path_is_missing file1
) &&
cleanup_git &&
@@ -159,7 +159,7 @@ test_expect_success 'clone --use-client-spec' '
"$GITP4" sync //depot/... &&
git checkout -b master p4/master &&
test_path_is_file bus/dir/f4 &&
- test_path_is_file file1
+ test_path_is_missing file1
)
'
diff --git a/t/t9809-git-p4-client-view.sh b/t/t9809-git-p4-client-view.sh
index c9471d562d..ae9145e307 100755
--- a/t/t9809-git-p4-client-view.sh
+++ b/t/t9809-git-p4-client-view.sh
@@ -101,12 +101,18 @@ test_expect_success 'unsupported view wildcard *' '
test_must_fail "$GITP4" clone --use-client-spec --dest="$git" //depot
'
-test_expect_success 'wildcard ... only supported at end of spec' '
+test_expect_success 'wildcard ... only supported at end of spec 1' '
client_view "//depot/.../file11 //client/.../file11" &&
test_when_finished cleanup_git &&
test_must_fail "$GITP4" clone --use-client-spec --dest="$git" //depot
'
+test_expect_success 'wildcard ... only supported at end of spec 2' '
+ client_view "//depot/.../a/... //client/.../a/..." &&
+ test_when_finished cleanup_git &&
+ test_must_fail "$GITP4" clone --use-client-spec --dest="$git" //depot
+'
+
test_expect_success 'basic map' '
client_view "//depot/dir1/... //client/cli1/..." &&
files="cli1/file11 cli1/file12" &&
@@ -241,6 +247,393 @@ test_expect_success 'quotes on rhs only' '
'
#
+# What happens when two files of the same name are overlayed together?
+# The last-listed file should take preference.
+#
+# //depot
+# - dir1
+# - file11
+# - file12
+# - filecollide
+# - dir2
+# - file21
+# - file22
+# - filecollide
+#
+test_expect_success 'overlay collision setup' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/filecollide >dir1/filecollide &&
+ p4 add dir1/filecollide &&
+ p4 submit -d dir1/filecollide &&
+ echo dir2/filecollide >dir2/filecollide &&
+ p4 add dir2/filecollide &&
+ p4 submit -d dir2/filecollide
+ )
+'
+
+test_expect_success 'overlay collision 1 to 2' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 filecollide" &&
+ echo dir2/filecollide >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/filecollide &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/filecollide
+'
+
+test_expect_failure 'overlay collision 2 to 1' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 filecollide" &&
+ echo dir1/filecollide >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/filecollide &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/filecollide
+'
+
+test_expect_success 'overlay collision delete 2' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir2/filecollide &&
+ p4 submit -d "remove dir2/filecollide"
+ )
+'
+
+# no filecollide, got deleted with dir2
+test_expect_failure 'overlay collision 1 to 2, but 2 deleted' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'overlay collision update 1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 open dir1/filecollide &&
+ echo dir1/filecollide update >dir1/filecollide &&
+ p4 submit -d "update dir1/filecollide"
+ )
+'
+
+# still no filecollide, dir2 still wins with the deletion even though the
+# change to dir1 is more recent
+test_expect_failure 'overlay collision 1 to 2, but 2 deleted, then 1 updated' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'overlay collision delete filecollides' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir1/filecollide dir2/filecollide &&
+ p4 submit -d "remove filecollides"
+ )
+'
+
+#
+# Overlays as part of sync, rather than initial checkout:
+# 1. add a file in dir1
+# 2. sync to include it
+# 3. add same file in dir2
+# 4. sync, make sure content switches as dir2 has priority
+# 5. add another file in dir1
+# 6. sync
+# 7. add/delete same file in dir2
+# 8. sync, make sure it disappears, again dir2 wins
+# 9. cleanup
+#
+# //depot
+# - dir1
+# - file11
+# - file12
+# - colA
+# - colB
+# - dir2
+# - file21
+# - file22
+# - colA
+# - colB
+#
+test_expect_success 'overlay sync: add colA in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colA >dir1/colA &&
+ p4 add dir1/colA &&
+ p4 submit -d dir1/colA
+ )
+'
+
+test_expect_success 'overlay sync: initial git checkout' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir1/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync: add colA in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colA >dir2/colA &&
+ p4 add dir2/colA &&
+ p4 submit -d dir2/colA
+ )
+'
+
+test_expect_success 'overlay sync: colA content switch' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir2/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync: add colB in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colB >dir1/colB &&
+ p4 add dir1/colB &&
+ p4 submit -d dir1/colB
+ )
+'
+
+test_expect_success 'overlay sync: colB appears' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA colB" &&
+ echo dir1/colB >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colB &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colB
+'
+
+test_expect_success 'overlay sync: add/delete colB in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colB >dir2/colB &&
+ p4 add dir2/colB &&
+ p4 submit -d dir2/colB &&
+ p4 delete dir2/colB &&
+ p4 submit -d "delete dir2/colB"
+ )
+'
+
+test_expect_success 'overlay sync: colB disappears' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files
+'
+
+test_expect_success 'overlay sync: cleanup' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir1/colA dir2/colA dir1/colB &&
+ p4 submit -d "remove overlay sync files"
+ )
+'
+
+#
+# Overlay tests again, but swapped so dir1 has priority.
+# 1. add a file in dir1
+# 2. sync to include it
+# 3. add same file in dir2
+# 4. sync, make sure content does not switch
+# 5. add another file in dir1
+# 6. sync
+# 7. add/delete same file in dir2
+# 8. sync, make sure it is still there
+# 9. cleanup
+#
+# //depot
+# - dir1
+# - file11
+# - file12
+# - colA
+# - colB
+# - dir2
+# - file21
+# - file22
+# - colA
+# - colB
+#
+test_expect_success 'overlay sync swap: add colA in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colA >dir1/colA &&
+ p4 add dir1/colA &&
+ p4 submit -d dir1/colA
+ )
+'
+
+test_expect_success 'overlay sync swap: initial git checkout' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir1/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync swap: add colA in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colA >dir2/colA &&
+ p4 add dir2/colA &&
+ p4 submit -d dir2/colA
+ )
+'
+
+test_expect_failure 'overlay sync swap: colA no content switch' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir1/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync swap: add colB in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colB >dir1/colB &&
+ p4 add dir1/colB &&
+ p4 submit -d dir1/colB
+ )
+'
+
+test_expect_success 'overlay sync swap: colB appears' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA colB" &&
+ echo dir1/colB >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colB &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colB
+'
+
+test_expect_success 'overlay sync swap: add/delete colB in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colB >dir2/colB &&
+ p4 add dir2/colB &&
+ p4 submit -d dir2/colB &&
+ p4 delete dir2/colB &&
+ p4 submit -d "delete dir2/colB"
+ )
+'
+
+test_expect_failure 'overlay sync swap: colB no change' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA colB" &&
+ echo dir1/colB >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colB &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$cli"/colB
+'
+
+test_expect_success 'overlay sync swap: cleanup' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir1/colA dir2/colA dir1/colB &&
+ p4 submit -d "remove overlay sync files"
+ )
+'
+
+#
# Rename directories to test quoting in depot-side mappings
# //depot
# - "dir 1"
diff --git a/t/test-lib.sh b/t/test-lib.sh
index a65dfc7ea9..e28d5fdebe 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -64,7 +64,8 @@ GIT_AUTHOR_NAME='A U Thor'
GIT_COMMITTER_EMAIL=committer@example.com
GIT_COMMITTER_NAME='C O Mitter'
GIT_MERGE_VERBOSITY=5
-export GIT_MERGE_VERBOSITY
+GIT_MERGE_AUTOEDIT=no
+export GIT_MERGE_VERBOSITY GIT_MERGE_AUTOEDIT
export GIT_AUTHOR_EMAIL GIT_AUTHOR_NAME
export GIT_COMMITTER_EMAIL GIT_COMMITTER_NAME
export EDITOR
@@ -329,6 +330,19 @@ test_tick () {
export GIT_COMMITTER_DATE GIT_AUTHOR_DATE
}
+# Stop execution and start a shell. This is useful for debugging tests and
+# only makes sense together with "-v".
+#
+# Be sure to remove all invocations of this command before submitting.
+
+test_pause () {
+ if test "$verbose" = t; then
+ "$SHELL_PATH" <&6 >&3 2>&4
+ else
+ error >&5 "test_pause requires --verbose"
+ fi
+}
+
# Call test_commit with the arguments "<message> [<file> [<contents>]]"
#
# This will commit a file with the given contents and the given commit
@@ -381,11 +395,20 @@ test_config () {
git config "$@"
}
+
test_config_global () {
test_when_finished "test_unconfig --global '$1'" &&
git config --global "$@"
}
+write_script () {
+ {
+ echo "#!${2-"$SHELL_PATH"}" &&
+ cat
+ } >"$1" &&
+ chmod +x "$1"
+}
+
# Use test_set_prereq to tell that a particular prerequisite is available.
# The prerequisite can later be checked for in two ways:
#
diff --git a/tag.c b/tag.c
index 3aa186df62..78d272b863 100644
--- a/tag.c
+++ b/tag.c
@@ -24,6 +24,18 @@ struct object *deref_tag(struct object *o, const char *warn, int warnlen)
return o;
}
+struct object *deref_tag_noverify(struct object *o)
+{
+ while (o && o->type == OBJ_TAG) {
+ o = parse_object(o->sha1);
+ if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
+ o = ((struct tag *)o)->tagged;
+ else
+ o = NULL;
+ }
+ return o;
+}
+
struct tag *lookup_tag(const unsigned char *sha1)
{
struct object *obj = lookup_object(sha1);
diff --git a/tag.h b/tag.h
index 5ee88e6550..bc8a1e40f0 100644
--- a/tag.h
+++ b/tag.h
@@ -16,6 +16,7 @@ extern struct tag *lookup_tag(const unsigned char *sha1);
extern int parse_tag_buffer(struct tag *item, const void *data, unsigned long size);
extern int parse_tag(struct tag *item);
extern struct object *deref_tag(struct object *, const char *, int);
+extern struct object *deref_tag_noverify(struct object *);
extern size_t parse_signature(const char *buf, unsigned long size);
#endif /* TAG_H */
diff --git a/test-obj-pool.c b/test-obj-pool.c
deleted file mode 100644
index 5018863ef5..0000000000
--- a/test-obj-pool.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * test-obj-pool.c: code to exercise the svn importer's object pool
- */
-
-#include "cache.h"
-#include "vcs-svn/obj_pool.h"
-
-enum pool { POOL_ONE, POOL_TWO };
-obj_pool_gen(one, int, 1)
-obj_pool_gen(two, int, 4096)
-
-static uint32_t strtouint32(const char *s)
-{
- char *end;
- uintmax_t n = strtoumax(s, &end, 10);
- if (*s == '\0' || (*end != '\n' && *end != '\0'))
- die("invalid offset: %s", s);
- return (uint32_t) n;
-}
-
-static void handle_command(const char *command, enum pool pool, const char *arg)
-{
- switch (*command) {
- case 'a':
- if (!prefixcmp(command, "alloc ")) {
- uint32_t n = strtouint32(arg);
- printf("%"PRIu32"\n",
- pool == POOL_ONE ?
- one_alloc(n) : two_alloc(n));
- return;
- }
- case 'c':
- if (!prefixcmp(command, "commit ")) {
- pool == POOL_ONE ? one_commit() : two_commit();
- return;
- }
- if (!prefixcmp(command, "committed ")) {
- printf("%"PRIu32"\n",
- pool == POOL_ONE ?
- one_pool.committed : two_pool.committed);
- return;
- }
- case 'f':
- if (!prefixcmp(command, "free ")) {
- uint32_t n = strtouint32(arg);
- pool == POOL_ONE ? one_free(n) : two_free(n);
- return;
- }
- case 'n':
- if (!prefixcmp(command, "null ")) {
- printf("%"PRIu32"\n",
- pool == POOL_ONE ?
- one_offset(NULL) : two_offset(NULL));
- return;
- }
- case 'o':
- if (!prefixcmp(command, "offset ")) {
- uint32_t n = strtouint32(arg);
- printf("%"PRIu32"\n",
- pool == POOL_ONE ?
- one_offset(one_pointer(n)) :
- two_offset(two_pointer(n)));
- return;
- }
- case 'r':
- if (!prefixcmp(command, "reset ")) {
- pool == POOL_ONE ? one_reset() : two_reset();
- return;
- }
- case 's':
- if (!prefixcmp(command, "set ")) {
- uint32_t n = strtouint32(arg);
- if (pool == POOL_ONE)
- *one_pointer(n) = 1;
- else
- *two_pointer(n) = 1;
- return;
- }
- case 't':
- if (!prefixcmp(command, "test ")) {
- uint32_t n = strtouint32(arg);
- printf("%d\n", pool == POOL_ONE ?
- *one_pointer(n) : *two_pointer(n));
- return;
- }
- default:
- die("unrecognized command: %s", command);
- }
-}
-
-static void handle_line(const char *line)
-{
- const char *arg = strchr(line, ' ');
- enum pool pool;
-
- if (arg && !prefixcmp(arg + 1, "one"))
- pool = POOL_ONE;
- else if (arg && !prefixcmp(arg + 1, "two"))
- pool = POOL_TWO;
- else
- die("no pool specified: %s", line);
-
- handle_command(line, pool, arg + strlen("one "));
-}
-
-int main(int argc, char *argv[])
-{
- struct strbuf sb = STRBUF_INIT;
- if (argc != 1)
- usage("test-obj-str < script");
-
- while (strbuf_getline(&sb, stdin, '\n') != EOF)
- handle_line(sb.buf);
- strbuf_release(&sb);
- return 0;
-}
diff --git a/test-string-pool.c b/test-string-pool.c
deleted file mode 100644
index c5782e6bce..0000000000
--- a/test-string-pool.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * test-string-pool.c: code to exercise the svn importer's string pool
- */
-
-#include "git-compat-util.h"
-#include "vcs-svn/string_pool.h"
-
-int main(int argc, char *argv[])
-{
- const uint32_t unequal = pool_intern("does not equal");
- const uint32_t equal = pool_intern("equals");
- uint32_t buf[3];
- uint32_t n;
-
- if (argc != 2)
- usage("test-string-pool <string>,<string>");
-
- n = pool_tok_seq(3, buf, ",-", argv[1]);
- if (n >= 3)
- die("too many strings");
- if (n <= 1)
- die("too few strings");
-
- buf[2] = buf[1];
- buf[1] = (buf[0] == buf[2]) ? equal : unequal;
- pool_print_seq(3, buf, ' ', stdout);
- fputc('\n', stdout);
-
- pool_reset();
- return 0;
-}
diff --git a/test-svn-fe.c b/test-svn-fe.c
index b42ba789b1..332a5f711d 100644
--- a/test-svn-fe.c
+++ b/test-svn-fe.c
@@ -4,15 +4,51 @@
#include "git-compat-util.h"
#include "vcs-svn/svndump.h"
+#include "vcs-svn/svndiff.h"
+#include "vcs-svn/sliding_window.h"
+#include "vcs-svn/line_buffer.h"
-int main(int argc, char *argv[])
+static const char test_svnfe_usage[] =
+ "test-svn-fe (<dumpfile> | [-d] <preimage> <delta> <len>)";
+
+static int apply_delta(int argc, char *argv[])
{
- if (argc != 2)
- usage("test-svn-fe <file>");
- if (svndump_init(argv[1]))
+ struct line_buffer preimage = LINE_BUFFER_INIT;
+ struct line_buffer delta = LINE_BUFFER_INIT;
+ struct sliding_view preimage_view = SLIDING_VIEW_INIT(&preimage, -1);
+
+ if (argc != 5)
+ usage(test_svnfe_usage);
+
+ if (buffer_init(&preimage, argv[2]))
+ die_errno("cannot open preimage");
+ if (buffer_init(&delta, argv[3]))
+ die_errno("cannot open delta");
+ if (svndiff0_apply(&delta, (off_t) strtoull(argv[4], NULL, 0),
+ &preimage_view, stdout))
return 1;
- svndump_read(NULL);
- svndump_deinit();
- svndump_reset();
+ if (buffer_deinit(&preimage))
+ die_errno("cannot close preimage");
+ if (buffer_deinit(&delta))
+ die_errno("cannot close delta");
+ buffer_reset(&preimage);
+ strbuf_release(&preimage_view.buf);
+ buffer_reset(&delta);
return 0;
}
+
+int main(int argc, char *argv[])
+{
+ if (argc == 2) {
+ if (svndump_init(argv[1]))
+ return 1;
+ svndump_read(NULL);
+ svndump_deinit();
+ svndump_reset();
+ return 0;
+ }
+
+ if (argc >= 2 && !strcmp(argv[1], "-d"))
+ return apply_delta(argc, argv);
+ usage(test_svnfe_usage);
+}
diff --git a/test-treap.c b/test-treap.c
deleted file mode 100644
index 294d7ee273..0000000000
--- a/test-treap.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * test-treap.c: code to exercise the svn importer's treap structure
- */
-
-#include "cache.h"
-#include "vcs-svn/obj_pool.h"
-#include "vcs-svn/trp.h"
-
-struct int_node {
- uintmax_t n;
- struct trp_node children;
-};
-
-obj_pool_gen(node, struct int_node, 3)
-
-static int node_cmp(struct int_node *a, struct int_node *b)
-{
- return (a->n > b->n) - (a->n < b->n);
-}
-
-trp_gen(static, treap_, struct int_node, children, node, node_cmp)
-
-static void strtonode(struct int_node *item, const char *s)
-{
- char *end;
- item->n = strtoumax(s, &end, 10);
- if (*s == '\0' || (*end != '\n' && *end != '\0'))
- die("invalid integer: %s", s);
-}
-
-int main(int argc, char *argv[])
-{
- struct strbuf sb = STRBUF_INIT;
- struct trp_root root = { ~0U };
- uint32_t item;
-
- if (argc != 1)
- usage("test-treap < ints");
-
- while (strbuf_getline(&sb, stdin, '\n') != EOF) {
- struct int_node *node = node_pointer(node_alloc(1));
-
- item = node_offset(node);
- strtonode(node, sb.buf);
- node = treap_insert(&root, node_pointer(item));
- if (node_offset(node) != item)
- die("inserted %"PRIu32" in place of %"PRIu32"",
- node_offset(node), item);
- }
-
- item = node_offset(treap_first(&root));
- while (~item) {
- uint32_t next;
- struct int_node *tmp = node_pointer(node_alloc(1));
-
- tmp->n = node_pointer(item)->n;
- next = node_offset(treap_next(&root, node_pointer(item)));
-
- treap_remove(&root, node_pointer(item));
- item = node_offset(treap_nsearch(&root, tmp));
-
- if (item != next && (!~item || node_pointer(item)->n != tmp->n))
- die("found %"PRIuMAX" in place of %"PRIuMAX"",
- ~item ? node_pointer(item)->n : ~(uintmax_t) 0,
- ~next ? node_pointer(next)->n : ~(uintmax_t) 0);
- printf("%"PRIuMAX"\n", tmp->n);
- }
- node_reset();
- return 0;
-}
diff --git a/upload-pack.c b/upload-pack.c
index 6f36f6255c..bb08e2eb0d 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -585,6 +585,7 @@ static void receive_needs(void)
write_str_in_full(debug_fd, "#S\n");
for (;;) {
struct object *o;
+ const char *features;
unsigned char sha1_buf[20];
len = packet_read_line(0, line, sizeof(line));
reset_timeout();
@@ -616,23 +617,26 @@ static void receive_needs(void)
get_sha1_hex(line+5, sha1_buf))
die("git upload-pack: protocol error, "
"expected to get sha, not '%s'", line);
- if (strstr(line+45, "multi_ack_detailed"))
+
+ features = line + 45;
+
+ if (parse_feature_request(features, "multi_ack_detailed"))
multi_ack = 2;
- else if (strstr(line+45, "multi_ack"))
+ else if (parse_feature_request(features, "multi_ack"))
multi_ack = 1;
- if (strstr(line+45, "no-done"))
+ if (parse_feature_request(features, "no-done"))
no_done = 1;
- if (strstr(line+45, "thin-pack"))
+ if (parse_feature_request(features, "thin-pack"))
use_thin_pack = 1;
- if (strstr(line+45, "ofs-delta"))
+ if (parse_feature_request(features, "ofs-delta"))
use_ofs_delta = 1;
- if (strstr(line+45, "side-band-64k"))
+ if (parse_feature_request(features, "side-band-64k"))
use_sideband = LARGE_PACKET_MAX;
- else if (strstr(line+45, "side-band"))
+ else if (parse_feature_request(features, "side-band"))
use_sideband = DEFAULT_PACKET_MAX;
- if (strstr(line+45, "no-progress"))
+ if (parse_feature_request(features, "no-progress"))
no_progress = 1;
- if (strstr(line+45, "include-tag"))
+ if (parse_feature_request(features, "include-tag"))
use_include_tag = 1;
o = lookup_object(sha1_buf);
@@ -720,11 +724,14 @@ static int send_ref(const char *refname, const unsigned char *sha1, int flag, vo
static const char *capabilities = "multi_ack thin-pack side-band"
" side-band-64k ofs-delta shallow no-progress"
" include-tag multi_ack_detailed";
- struct object *o = parse_object(sha1);
+ struct object *o = lookup_unknown_object(sha1);
const char *refname_nons = strip_namespace(refname);
- if (!o)
- die("git upload-pack: cannot find object %s:", sha1_to_hex(sha1));
+ if (o->type == OBJ_NONE) {
+ o->type = sha1_object_info(sha1, NULL);
+ if (o->type < 0)
+ die("git upload-pack: cannot find object %s:", sha1_to_hex(sha1));
+ }
if (capabilities)
packet_write(1, "%s %s%c%s%s\n", sha1_to_hex(sha1), refname_nons,
@@ -738,7 +745,7 @@ static int send_ref(const char *refname, const unsigned char *sha1, int flag, vo
nr_our_refs++;
}
if (o->type == OBJ_TAG) {
- o = deref_tag(o, refname, 0);
+ o = deref_tag_noverify(o);
if (o)
packet_write(1, "%s %s^{}\n", sha1_to_hex(o->sha1), refname_nons);
}
diff --git a/vcs-svn/LICENSE b/vcs-svn/LICENSE
index 0a5e3c43a0..eb91858b82 100644
--- a/vcs-svn/LICENSE
+++ b/vcs-svn/LICENSE
@@ -1,8 +1,7 @@
Copyright (C) 2010 David Barr <david.barr@cordelta.com>.
All rights reserved.
-Copyright (C) 2008 Jason Evans <jasone@canonware.com>.
-All rights reserved.
+Copyright (C) 2010 Jonathan Nieder <jrnieder@gmail.com>.
Copyright (C) 2005 Stefan Hegny, hydrografix Consulting GmbH,
Frankfurt/Main, Germany
diff --git a/vcs-svn/fast_export.c b/vcs-svn/fast_export.c
index 99ed70b88a..b823b8519c 100644
--- a/vcs-svn/fast_export.c
+++ b/vcs-svn/fast_export.c
@@ -4,34 +4,77 @@
*/
#include "git-compat-util.h"
+#include "strbuf.h"
+#include "quote.h"
#include "fast_export.h"
-#include "line_buffer.h"
#include "repo_tree.h"
-#include "string_pool.h"
+#include "strbuf.h"
+#include "svndiff.h"
+#include "sliding_window.h"
+#include "line_buffer.h"
#define MAX_GITSVN_LINE_LEN 4096
static uint32_t first_commit_done;
+static struct line_buffer postimage = LINE_BUFFER_INIT;
+static struct line_buffer report_buffer = LINE_BUFFER_INIT;
+
+/* NEEDSWORK: move to fast_export_init() */
+static int init_postimage(void)
+{
+ static int postimage_initialized;
+ if (postimage_initialized)
+ return 0;
+ postimage_initialized = 1;
+ return buffer_tmpfile_init(&postimage);
+}
+
+void fast_export_init(int fd)
+{
+ first_commit_done = 0;
+ if (buffer_fdinit(&report_buffer, fd))
+ die_errno("cannot read from file descriptor %d", fd);
+}
+
+void fast_export_deinit(void)
+{
+ if (buffer_deinit(&report_buffer))
+ die_errno("error closing fast-import feedback stream");
+}
+
+void fast_export_reset(void)
+{
+ buffer_reset(&report_buffer);
+}
-void fast_export_delete(uint32_t depth, uint32_t *path)
+void fast_export_delete(const char *path)
{
putchar('D');
putchar(' ');
- pool_print_seq(depth, path, '/', stdout);
+ quote_c_style(path, NULL, stdout, 0);
putchar('\n');
}
-void fast_export_modify(uint32_t depth, uint32_t *path, uint32_t mode,
- uint32_t mark)
+static void fast_export_truncate(const char *path, uint32_t mode)
+{
+ fast_export_modify(path, mode, "inline");
+ printf("data 0\n\n");
+}
+
+void fast_export_modify(const char *path, uint32_t mode, const char *dataref)
{
/* Mode must be 100644, 100755, 120000, or 160000. */
- printf("M %06"PRIo32" :%"PRIu32" ", mode, mark);
- pool_print_seq(depth, path, '/', stdout);
+ if (!dataref) {
+ fast_export_truncate(path, mode);
+ return;
+ }
+ printf("M %06"PRIo32" %s ", mode, dataref);
+ quote_c_style(path, NULL, stdout, 0);
putchar('\n');
}
static char gitsvnline[MAX_GITSVN_LINE_LEN];
-void fast_export_commit(uint32_t revision, const char *author,
+void fast_export_begin_commit(uint32_t revision, const char *author,
const struct strbuf *log,
const char *uuid, const char *url,
unsigned long timestamp)
@@ -47,6 +90,7 @@ void fast_export_commit(uint32_t revision, const char *author,
*gitsvnline = '\0';
}
printf("commit refs/heads/master\n");
+ printf("mark :%"PRIu32"\n", revision);
printf("committer %s <%s@%s> %ld +0000\n",
*author ? author : "nobody",
*author ? author : "nobody",
@@ -57,15 +101,44 @@ void fast_export_commit(uint32_t revision, const char *author,
printf("%s\n", gitsvnline);
if (!first_commit_done) {
if (revision > 1)
- printf("from refs/heads/master^0\n");
+ printf("from :%"PRIu32"\n", revision - 1);
first_commit_done = 1;
}
- repo_diff(revision - 1, revision);
- fputc('\n', stdout);
+}
+void fast_export_end_commit(uint32_t revision)
+{
printf("progress Imported commit %"PRIu32".\n\n", revision);
}
+static void ls_from_rev(uint32_t rev, const char *path)
+{
+ /* ls :5 path/to/old/file */
+ printf("ls :%"PRIu32" ", rev);
+ quote_c_style(path, NULL, stdout, 0);
+ putchar('\n');
+ fflush(stdout);
+}
+
+static void ls_from_active_commit(const char *path)
+{
+ /* ls "path/to/file" */
+ printf("ls \"");
+ quote_c_style(path, NULL, stdout, 1);
+ printf("\"\n");
+ fflush(stdout);
+}
+
+static const char *get_response_line(void)
+{
+ const char *line = buffer_read_line(&report_buffer);
+ if (line)
+ return line;
+ if (buffer_ferror(&report_buffer))
+ die_errno("error reading from fast-import");
+ die("unexpected end of fast-import feedback");
+}
+
static void die_short_read(struct line_buffer *input)
{
if (buffer_ferror(input))
@@ -73,16 +146,171 @@ static void die_short_read(struct line_buffer *input)
die("invalid dump: unexpected end of file");
}
-void fast_export_blob(uint32_t mode, uint32_t mark, uint32_t len, struct line_buffer *input)
+static int ends_with(const char *s, size_t len, const char *suffix)
{
+ const size_t suffixlen = strlen(suffix);
+ if (len < suffixlen)
+ return 0;
+ return !memcmp(s + len - suffixlen, suffix, suffixlen);
+}
+
+static int parse_cat_response_line(const char *header, off_t *len)
+{
+ size_t headerlen = strlen(header);
+ uintmax_t n;
+ const char *type;
+ const char *end;
+
+ if (ends_with(header, headerlen, " missing"))
+ return error("cat-blob reports missing blob: %s", header);
+ type = memmem(header, headerlen, " blob ", strlen(" blob "));
+ if (!type)
+ return error("cat-blob header has wrong object type: %s", header);
+ n = strtoumax(type + strlen(" blob "), (char **) &end, 10);
+ if (end == type + strlen(" blob "))
+ return error("cat-blob header does not contain length: %s", header);
+ if (memchr(type + strlen(" blob "), '-', end - type - strlen(" blob ")))
+ return error("cat-blob header contains negative length: %s", header);
+ if (n == UINTMAX_MAX || n > maximum_signed_value_of_type(off_t))
+ return error("blob too large for current definition of off_t");
+ *len = n;
+ if (*end)
+ return error("cat-blob header contains garbage after length: %s", header);
+ return 0;
+}
+
+static void check_preimage_overflow(off_t a, off_t b)
+{
+ if (signed_add_overflows(a, b))
+ die("blob too large for current definition of off_t");
+}
+
+static long apply_delta(off_t len, struct line_buffer *input,
+ const char *old_data, uint32_t old_mode)
+{
+ long ret;
+ struct sliding_view preimage = SLIDING_VIEW_INIT(&report_buffer, 0);
+ FILE *out;
+
+ if (init_postimage() || !(out = buffer_tmpfile_rewind(&postimage)))
+ die("cannot open temporary file for blob retrieval");
+ if (old_data) {
+ const char *response;
+ printf("cat-blob %s\n", old_data);
+ fflush(stdout);
+ response = get_response_line();
+ if (parse_cat_response_line(response, &preimage.max_off))
+ die("invalid cat-blob response: %s", response);
+ check_preimage_overflow(preimage.max_off, 1);
+ }
+ if (old_mode == REPO_MODE_LNK) {
+ strbuf_addstr(&preimage.buf, "link ");
+ check_preimage_overflow(preimage.max_off, strlen("link "));
+ preimage.max_off += strlen("link ");
+ check_preimage_overflow(preimage.max_off, 1);
+ }
+ if (svndiff0_apply(input, len, &preimage, out))
+ die("cannot apply delta");
+ if (old_data) {
+ /* Read the remainder of preimage and trailing newline. */
+ assert(!signed_add_overflows(preimage.max_off, 1));
+ preimage.max_off++; /* room for newline */
+ if (move_window(&preimage, preimage.max_off - 1, 1))
+ die("cannot seek to end of input");
+ if (preimage.buf.buf[0] != '\n')
+ die("missing newline after cat-blob response");
+ }
+ ret = buffer_tmpfile_prepare_to_read(&postimage);
+ if (ret < 0)
+ die("cannot read temporary file for blob retrieval");
+ strbuf_release(&preimage.buf);
+ return ret;
+}
+
+void fast_export_data(uint32_t mode, off_t len, struct line_buffer *input)
+{
+ assert(len >= 0);
if (mode == REPO_MODE_LNK) {
/* svn symlink blobs start with "link " */
+ if (len < 5)
+ die("invalid dump: symlink too short for \"link\" prefix");
len -= 5;
if (buffer_skip_bytes(input, 5) != 5)
die_short_read(input);
}
- printf("blob\nmark :%"PRIu32"\ndata %"PRIu32"\n", mark, len);
+ printf("data %"PRIuMAX"\n", (uintmax_t) len);
if (buffer_copy_bytes(input, len) != len)
die_short_read(input);
fputc('\n', stdout);
}
+
+static int parse_ls_response(const char *response, uint32_t *mode,
+ struct strbuf *dataref)
+{
+ const char *tab;
+ const char *response_end;
+
+ assert(response);
+ response_end = response + strlen(response);
+
+ if (*response == 'm') { /* Missing. */
+ errno = ENOENT;
+ return -1;
+ }
+
+ /* Mode. */
+ if (response_end - response < strlen("100644") ||
+ response[strlen("100644")] != ' ')
+ die("invalid ls response: missing mode: %s", response);
+ *mode = 0;
+ for (; *response != ' '; response++) {
+ char ch = *response;
+ if (ch < '0' || ch > '7')
+ die("invalid ls response: mode is not octal: %s", response);
+ *mode *= 8;
+ *mode += ch - '0';
+ }
+
+ /* ' blob ' or ' tree ' */
+ if (response_end - response < strlen(" blob ") ||
+ (response[1] != 'b' && response[1] != 't'))
+ die("unexpected ls response: not a tree or blob: %s", response);
+ response += strlen(" blob ");
+
+ /* Dataref. */
+ tab = memchr(response, '\t', response_end - response);
+ if (!tab)
+ die("invalid ls response: missing tab: %s", response);
+ strbuf_add(dataref, response, tab - response);
+ return 0;
+}
+
+int fast_export_ls_rev(uint32_t rev, const char *path,
+ uint32_t *mode, struct strbuf *dataref)
+{
+ ls_from_rev(rev, path);
+ return parse_ls_response(get_response_line(), mode, dataref);
+}
+
+int fast_export_ls(const char *path, uint32_t *mode, struct strbuf *dataref)
+{
+ ls_from_active_commit(path);
+ return parse_ls_response(get_response_line(), mode, dataref);
+}
+
+void fast_export_blob_delta(uint32_t mode,
+ uint32_t old_mode, const char *old_data,
+ off_t len, struct line_buffer *input)
+{
+ long postimage_len;
+
+ assert(len >= 0);
+ postimage_len = apply_delta(len, input, old_data, old_mode);
+ if (mode == REPO_MODE_LNK) {
+ buffer_skip_bytes(&postimage, strlen("link "));
+ postimage_len -= strlen("link ");
+ }
+ printf("data %ld\n", postimage_len);
+ buffer_copy_bytes(&postimage, postimage_len);
+ fputc('\n', stdout);
+}
diff --git a/vcs-svn/fast_export.h b/vcs-svn/fast_export.h
index 33a8fe996f..aa629f54ff 100644
--- a/vcs-svn/fast_export.h
+++ b/vcs-svn/fast_export.h
@@ -1,16 +1,28 @@
#ifndef FAST_EXPORT_H_
#define FAST_EXPORT_H_
-#include "line_buffer.h"
struct strbuf;
+struct line_buffer;
-void fast_export_delete(uint32_t depth, uint32_t *path);
-void fast_export_modify(uint32_t depth, uint32_t *path, uint32_t mode,
- uint32_t mark);
-void fast_export_commit(uint32_t revision, const char *author,
+void fast_export_init(int fd);
+void fast_export_deinit(void);
+void fast_export_reset(void);
+
+void fast_export_delete(const char *path);
+void fast_export_modify(const char *path, uint32_t mode, const char *dataref);
+void fast_export_begin_commit(uint32_t revision, const char *author,
const struct strbuf *log, const char *uuid,
const char *url, unsigned long timestamp);
-void fast_export_blob(uint32_t mode, uint32_t mark, uint32_t len,
- struct line_buffer *input);
+void fast_export_end_commit(uint32_t revision);
+void fast_export_data(uint32_t mode, off_t len, struct line_buffer *input);
+void fast_export_blob_delta(uint32_t mode,
+ uint32_t old_mode, const char *old_data,
+ off_t len, struct line_buffer *input);
+
+/* If there is no such file at that rev, returns -1, errno == ENOENT. */
+int fast_export_ls_rev(uint32_t rev, const char *path,
+ uint32_t *mode_out, struct strbuf *dataref_out);
+int fast_export_ls(const char *path,
+ uint32_t *mode_out, struct strbuf *dataref_out);
#endif
diff --git a/vcs-svn/line_buffer.c b/vcs-svn/line_buffer.c
index c39038723e..01fcb842f1 100644
--- a/vcs-svn/line_buffer.c
+++ b/vcs-svn/line_buffer.c
@@ -91,10 +91,10 @@ char *buffer_read_line(struct line_buffer *buf)
return buf->line_buffer;
}
-void buffer_read_binary(struct line_buffer *buf,
- struct strbuf *sb, uint32_t size)
+size_t buffer_read_binary(struct line_buffer *buf,
+ struct strbuf *sb, size_t size)
{
- strbuf_fread(sb, size, buf->infile);
+ return strbuf_fread(sb, size, buf->infile);
}
off_t buffer_copy_bytes(struct line_buffer *buf, off_t nbytes)
diff --git a/vcs-svn/line_buffer.h b/vcs-svn/line_buffer.h
index d0b22dda76..8901f214ba 100644
--- a/vcs-svn/line_buffer.h
+++ b/vcs-svn/line_buffer.h
@@ -23,7 +23,7 @@ long buffer_tmpfile_prepare_to_read(struct line_buffer *buf);
int buffer_ferror(struct line_buffer *buf);
char *buffer_read_line(struct line_buffer *buf);
int buffer_read_char(struct line_buffer *buf);
-void buffer_read_binary(struct line_buffer *buf, struct strbuf *sb, uint32_t len);
+size_t buffer_read_binary(struct line_buffer *buf, struct strbuf *sb, size_t len);
/* Returns number of bytes read (not necessarily written). */
off_t buffer_copy_bytes(struct line_buffer *buf, off_t len);
off_t buffer_skip_bytes(struct line_buffer *buf, off_t len);
diff --git a/vcs-svn/obj_pool.h b/vcs-svn/obj_pool.h
deleted file mode 100644
index deb6eb8135..0000000000
--- a/vcs-svn/obj_pool.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed under a two-clause BSD-style license.
- * See LICENSE for details.
- */
-
-#ifndef OBJ_POOL_H_
-#define OBJ_POOL_H_
-
-#include "git-compat-util.h"
-
-#define MAYBE_UNUSED __attribute__((__unused__))
-
-#define obj_pool_gen(pre, obj_t, initial_capacity) \
-static struct { \
- uint32_t committed; \
- uint32_t size; \
- uint32_t capacity; \
- obj_t *base; \
-} pre##_pool = {0, 0, 0, NULL}; \
-static MAYBE_UNUSED uint32_t pre##_alloc(uint32_t count) \
-{ \
- uint32_t offset; \
- if (pre##_pool.size + count > pre##_pool.capacity) { \
- while (pre##_pool.size + count > pre##_pool.capacity) \
- if (pre##_pool.capacity) \
- pre##_pool.capacity *= 2; \
- else \
- pre##_pool.capacity = initial_capacity; \
- pre##_pool.base = realloc(pre##_pool.base, \
- pre##_pool.capacity * sizeof(obj_t)); \
- } \
- offset = pre##_pool.size; \
- pre##_pool.size += count; \
- return offset; \
-} \
-static MAYBE_UNUSED void pre##_free(uint32_t count) \
-{ \
- pre##_pool.size -= count; \
-} \
-static MAYBE_UNUSED uint32_t pre##_offset(obj_t *obj) \
-{ \
- return obj == NULL ? ~0 : obj - pre##_pool.base; \
-} \
-static MAYBE_UNUSED obj_t *pre##_pointer(uint32_t offset) \
-{ \
- return offset >= pre##_pool.size ? NULL : &pre##_pool.base[offset]; \
-} \
-static MAYBE_UNUSED void pre##_commit(void) \
-{ \
- pre##_pool.committed = pre##_pool.size; \
-} \
-static MAYBE_UNUSED void pre##_reset(void) \
-{ \
- free(pre##_pool.base); \
- pre##_pool.base = NULL; \
- pre##_pool.size = 0; \
- pre##_pool.capacity = 0; \
- pre##_pool.committed = 0; \
-}
-
-#endif
diff --git a/vcs-svn/repo_tree.c b/vcs-svn/repo_tree.c
index c3f198d29a..67d27f0b6c 100644
--- a/vcs-svn/repo_tree.c
+++ b/vcs-svn/repo_tree.c
@@ -4,323 +4,45 @@
*/
#include "git-compat-util.h"
-
-#include "string_pool.h"
+#include "strbuf.h"
#include "repo_tree.h"
-#include "obj_pool.h"
#include "fast_export.h"
-#include "trp.h"
-
-struct repo_dirent {
- uint32_t name_offset;
- struct trp_node children;
- uint32_t mode;
- uint32_t content_offset;
-};
-
-struct repo_dir {
- struct trp_root entries;
-};
-
-struct repo_commit {
- uint32_t root_dir_offset;
-};
-
-/* Memory pools for commit, dir and dirent */
-obj_pool_gen(commit, struct repo_commit, 4096)
-obj_pool_gen(dir, struct repo_dir, 4096)
-obj_pool_gen(dent, struct repo_dirent, 4096)
-
-static uint32_t active_commit;
-static uint32_t mark;
-
-static int repo_dirent_name_cmp(const void *a, const void *b);
-
-/* Treap for directory entries */
-trp_gen(static, dent_, struct repo_dirent, children, dent, repo_dirent_name_cmp)
-
-uint32_t next_blob_mark(void)
+const char *repo_read_path(const char *path, uint32_t *mode_out)
{
- return mark++;
-}
+ int err;
+ static struct strbuf buf = STRBUF_INIT;
-static struct repo_dir *repo_commit_root_dir(struct repo_commit *commit)
-{
- return dir_pointer(commit->root_dir_offset);
-}
-
-static struct repo_dirent *repo_first_dirent(struct repo_dir *dir)
-{
- return dent_first(&dir->entries);
-}
-
-static int repo_dirent_name_cmp(const void *a, const void *b)
-{
- const struct repo_dirent *dent1 = a, *dent2 = b;
- uint32_t a_offset = dent1->name_offset;
- uint32_t b_offset = dent2->name_offset;
- return (a_offset > b_offset) - (a_offset < b_offset);
-}
-
-static int repo_dirent_is_dir(struct repo_dirent *dent)
-{
- return dent != NULL && dent->mode == REPO_MODE_DIR;
-}
-
-static struct repo_dir *repo_dir_from_dirent(struct repo_dirent *dent)
-{
- if (!repo_dirent_is_dir(dent))
+ strbuf_reset(&buf);
+ err = fast_export_ls(path, mode_out, &buf);
+ if (err) {
+ if (errno != ENOENT)
+ die_errno("BUG: unexpected fast_export_ls error");
+ /* Treat missing paths as directories. */
+ *mode_out = REPO_MODE_DIR;
return NULL;
- return dir_pointer(dent->content_offset);
-}
-
-static struct repo_dir *repo_clone_dir(struct repo_dir *orig_dir)
-{
- uint32_t orig_o, new_o;
- orig_o = dir_offset(orig_dir);
- if (orig_o >= dir_pool.committed)
- return orig_dir;
- new_o = dir_alloc(1);
- orig_dir = dir_pointer(orig_o);
- *dir_pointer(new_o) = *orig_dir;
- return dir_pointer(new_o);
-}
-
-static struct repo_dirent *repo_read_dirent(uint32_t revision,
- const uint32_t *path)
-{
- uint32_t name = 0;
- struct repo_dirent *key = dent_pointer(dent_alloc(1));
- struct repo_dir *dir = NULL;
- struct repo_dirent *dent = NULL;
- dir = repo_commit_root_dir(commit_pointer(revision));
- while (~(name = *path++)) {
- key->name_offset = name;
- dent = dent_search(&dir->entries, key);
- if (dent == NULL || !repo_dirent_is_dir(dent))
- break;
- dir = repo_dir_from_dirent(dent);
}
- dent_free(1);
- return dent;
+ return buf.buf;
}
-static void repo_write_dirent(const uint32_t *path, uint32_t mode,
- uint32_t content_offset, uint32_t del)
+void repo_copy(uint32_t revision, const char *src, const char *dst)
{
- uint32_t name, revision, dir_o = ~0U, parent_dir_o = ~0U;
- struct repo_dir *dir;
- struct repo_dirent *key;
- struct repo_dirent *dent = NULL;
- revision = active_commit;
- dir = repo_commit_root_dir(commit_pointer(revision));
- dir = repo_clone_dir(dir);
- commit_pointer(revision)->root_dir_offset = dir_offset(dir);
- while (~(name = *path++)) {
- parent_dir_o = dir_offset(dir);
-
- key = dent_pointer(dent_alloc(1));
- key->name_offset = name;
-
- dent = dent_search(&dir->entries, key);
- if (dent == NULL)
- dent = key;
- else
- dent_free(1);
-
- if (dent == key) {
- dent->mode = REPO_MODE_DIR;
- dent->content_offset = 0;
- dent = dent_insert(&dir->entries, dent);
- }
-
- if (dent_offset(dent) < dent_pool.committed) {
- dir_o = repo_dirent_is_dir(dent) ?
- dent->content_offset : ~0;
- dent_remove(&dir->entries, dent);
- dent = dent_pointer(dent_alloc(1));
- dent->name_offset = name;
- dent->mode = REPO_MODE_DIR;
- dent->content_offset = dir_o;
- dent = dent_insert(&dir->entries, dent);
- }
-
- dir = repo_dir_from_dirent(dent);
- dir = repo_clone_dir(dir);
- dent->content_offset = dir_offset(dir);
- }
- if (dent == NULL)
+ int err;
+ uint32_t mode;
+ static struct strbuf data = STRBUF_INIT;
+
+ strbuf_reset(&data);
+ err = fast_export_ls_rev(revision, src, &mode, &data);
+ if (err) {
+ if (errno != ENOENT)
+ die_errno("BUG: unexpected fast_export_ls_rev error");
+ fast_export_delete(dst);
return;
- dent->mode = mode;
- dent->content_offset = content_offset;
- if (del && ~parent_dir_o)
- dent_remove(&dir_pointer(parent_dir_o)->entries, dent);
-}
-
-uint32_t repo_read_path(const uint32_t *path)
-{
- uint32_t content_offset = 0;
- struct repo_dirent *dent = repo_read_dirent(active_commit, path);
- if (dent != NULL)
- content_offset = dent->content_offset;
- return content_offset;
-}
-
-uint32_t repo_read_mode(const uint32_t *path)
-{
- struct repo_dirent *dent = repo_read_dirent(active_commit, path);
- if (dent == NULL)
- die("invalid dump: path to be modified is missing");
- return dent->mode;
-}
-
-void repo_copy(uint32_t revision, const uint32_t *src, const uint32_t *dst)
-{
- uint32_t mode = 0, content_offset = 0;
- struct repo_dirent *src_dent;
- src_dent = repo_read_dirent(revision, src);
- if (src_dent != NULL) {
- mode = src_dent->mode;
- content_offset = src_dent->content_offset;
- repo_write_dirent(dst, mode, content_offset, 0);
- }
-}
-
-void repo_add(uint32_t *path, uint32_t mode, uint32_t blob_mark)
-{
- repo_write_dirent(path, mode, blob_mark, 0);
-}
-
-void repo_delete(uint32_t *path)
-{
- repo_write_dirent(path, 0, 0, 1);
-}
-
-static void repo_git_add_r(uint32_t depth, uint32_t *path, struct repo_dir *dir);
-
-static void repo_git_add(uint32_t depth, uint32_t *path, struct repo_dirent *dent)
-{
- if (repo_dirent_is_dir(dent))
- repo_git_add_r(depth, path, repo_dir_from_dirent(dent));
- else
- fast_export_modify(depth, path,
- dent->mode, dent->content_offset);
-}
-
-static void repo_git_add_r(uint32_t depth, uint32_t *path, struct repo_dir *dir)
-{
- struct repo_dirent *de = repo_first_dirent(dir);
- while (de) {
- path[depth] = de->name_offset;
- repo_git_add(depth + 1, path, de);
- de = dent_next(&dir->entries, de);
- }
-}
-
-static void repo_diff_r(uint32_t depth, uint32_t *path, struct repo_dir *dir1,
- struct repo_dir *dir2)
-{
- struct repo_dirent *de1, *de2;
- de1 = repo_first_dirent(dir1);
- de2 = repo_first_dirent(dir2);
-
- while (de1 && de2) {
- if (de1->name_offset < de2->name_offset) {
- path[depth] = de1->name_offset;
- fast_export_delete(depth + 1, path);
- de1 = dent_next(&dir1->entries, de1);
- continue;
- }
- if (de1->name_offset > de2->name_offset) {
- path[depth] = de2->name_offset;
- repo_git_add(depth + 1, path, de2);
- de2 = dent_next(&dir2->entries, de2);
- continue;
- }
- path[depth] = de1->name_offset;
-
- if (de1->mode == de2->mode &&
- de1->content_offset == de2->content_offset) {
- ; /* No change. */
- } else if (repo_dirent_is_dir(de1) && repo_dirent_is_dir(de2)) {
- repo_diff_r(depth + 1, path,
- repo_dir_from_dirent(de1),
- repo_dir_from_dirent(de2));
- } else if (!repo_dirent_is_dir(de1) && !repo_dirent_is_dir(de2)) {
- repo_git_add(depth + 1, path, de2);
- } else {
- fast_export_delete(depth + 1, path);
- repo_git_add(depth + 1, path, de2);
- }
- de1 = dent_next(&dir1->entries, de1);
- de2 = dent_next(&dir2->entries, de2);
- }
- while (de1) {
- path[depth] = de1->name_offset;
- fast_export_delete(depth + 1, path);
- de1 = dent_next(&dir1->entries, de1);
- }
- while (de2) {
- path[depth] = de2->name_offset;
- repo_git_add(depth + 1, path, de2);
- de2 = dent_next(&dir2->entries, de2);
- }
-}
-
-static uint32_t path_stack[REPO_MAX_PATH_DEPTH];
-
-void repo_diff(uint32_t r1, uint32_t r2)
-{
- repo_diff_r(0,
- path_stack,
- repo_commit_root_dir(commit_pointer(r1)),
- repo_commit_root_dir(commit_pointer(r2)));
-}
-
-void repo_commit(uint32_t revision, const char *author,
- const struct strbuf *log, const char *uuid, const char *url,
- unsigned long timestamp)
-{
- fast_export_commit(revision, author, log, uuid, url, timestamp);
- dent_commit();
- dir_commit();
- active_commit = commit_alloc(1);
- commit_pointer(active_commit)->root_dir_offset =
- commit_pointer(active_commit - 1)->root_dir_offset;
-}
-
-static void mark_init(void)
-{
- uint32_t i;
- mark = 0;
- for (i = 0; i < dent_pool.size; i++)
- if (!repo_dirent_is_dir(dent_pointer(i)) &&
- dent_pointer(i)->content_offset > mark)
- mark = dent_pointer(i)->content_offset;
- mark++;
-}
-
-void repo_init(void)
-{
- mark_init();
- if (commit_pool.size == 0) {
- /* Create empty tree for commit 0. */
- commit_alloc(1);
- commit_pointer(0)->root_dir_offset = dir_alloc(1);
- dir_pointer(0)->entries.trp_root = ~0;
- dir_commit();
}
- /* Preallocate next commit, ready for changes. */
- active_commit = commit_alloc(1);
- commit_pointer(active_commit)->root_dir_offset =
- commit_pointer(active_commit - 1)->root_dir_offset;
+ fast_export_modify(dst, mode, data.buf);
}
-void repo_reset(void)
+void repo_delete(const char *path)
{
- pool_reset();
- commit_reset();
- dir_reset();
- dent_reset();
+ fast_export_delete(path);
}
diff --git a/vcs-svn/repo_tree.h b/vcs-svn/repo_tree.h
index 37bde2e374..889c6a3c95 100644
--- a/vcs-svn/repo_tree.h
+++ b/vcs-svn/repo_tree.h
@@ -8,15 +8,11 @@ struct strbuf;
#define REPO_MODE_EXE 0100755
#define REPO_MODE_LNK 0120000
-#define REPO_MAX_PATH_LEN 4096
-#define REPO_MAX_PATH_DEPTH 1000
-
uint32_t next_blob_mark(void);
-void repo_copy(uint32_t revision, const uint32_t *src, const uint32_t *dst);
-void repo_add(uint32_t *path, uint32_t mode, uint32_t blob_mark);
-uint32_t repo_read_path(const uint32_t *path);
-uint32_t repo_read_mode(const uint32_t *path);
-void repo_delete(uint32_t *path);
+void repo_copy(uint32_t revision, const char *src, const char *dst);
+void repo_add(const char *path, uint32_t mode, uint32_t blob_mark);
+const char *repo_read_path(const char *path, uint32_t *mode_out);
+void repo_delete(const char *path);
void repo_commit(uint32_t revision, const char *author,
const struct strbuf *log, const char *uuid, const char *url,
long unsigned timestamp);
diff --git a/vcs-svn/sliding_window.c b/vcs-svn/sliding_window.c
new file mode 100644
index 0000000000..ec2707c9c4
--- /dev/null
+++ b/vcs-svn/sliding_window.c
@@ -0,0 +1,79 @@
+/*
+ * Licensed under a two-clause BSD-style license.
+ * See LICENSE for details.
+ */
+
+#include "git-compat-util.h"
+#include "sliding_window.h"
+#include "line_buffer.h"
+#include "strbuf.h"
+
+static int input_error(struct line_buffer *file)
+{
+ if (!buffer_ferror(file))
+ return error("delta preimage ends early");
+ return error("cannot read delta preimage: %s", strerror(errno));
+}
+
+static int skip_or_whine(struct line_buffer *file, off_t gap)
+{
+ if (buffer_skip_bytes(file, gap) != gap)
+ return input_error(file);
+ return 0;
+}
+
+static int read_to_fill_or_whine(struct line_buffer *file,
+ struct strbuf *buf, size_t width)
+{
+ buffer_read_binary(file, buf, width - buf->len);
+ if (buf->len != width)
+ return input_error(file);
+ return 0;
+}
+
+static int check_offset_overflow(off_t offset, uintmax_t len)
+{
+ if (len > maximum_signed_value_of_type(off_t))
+ return error("unrepresentable length in delta: "
+ "%"PRIuMAX" > OFF_MAX", len);
+ if (signed_add_overflows(offset, (off_t) len))
+ return error("unrepresentable offset in delta: "
+ "%"PRIuMAX" + %"PRIuMAX" > OFF_MAX",
+ (uintmax_t) offset, len);
+ return 0;
+}
+
+int move_window(struct sliding_view *view, off_t off, size_t width)
+{
+ off_t file_offset;
+ assert(view);
+ assert(view->width <= view->buf.len);
+ assert(!check_offset_overflow(view->off, view->buf.len));
+
+ if (check_offset_overflow(off, width))
+ return -1;
+ if (off < view->off || off + width < view->off + view->width)
+ return error("invalid delta: window slides left");
+ if (view->max_off >= 0 && view->max_off < off + width)
+ return error("delta preimage ends early");
+
+ file_offset = view->off + view->buf.len;
+ if (off < file_offset) {
+ /* Move the overlapping region into place. */
+ strbuf_remove(&view->buf, 0, off - view->off);
+ } else {
+ /* Seek ahead to skip the gap. */
+ if (skip_or_whine(view->file, off - file_offset))
+ return -1;
+ strbuf_setlen(&view->buf, 0);
+ }
+
+ if (view->buf.len > width)
+ ; /* Already read. */
+ else if (read_to_fill_or_whine(view->file, &view->buf, width))
+ return -1;
+
+ view->off = off;
+ view->width = width;
+ return 0;
+}
diff --git a/vcs-svn/sliding_window.h b/vcs-svn/sliding_window.h
new file mode 100644
index 0000000000..b43a825cba
--- /dev/null
+++ b/vcs-svn/sliding_window.h
@@ -0,0 +1,18 @@
+#ifndef SLIDING_WINDOW_H_
+#define SLIDING_WINDOW_H_
+
+#include "strbuf.h"
+
+struct sliding_view {
+ struct line_buffer *file;
+ off_t off;
+ size_t width;
+ off_t max_off; /* -1 means unlimited */
+ struct strbuf buf;
+};
+
+#define SLIDING_VIEW_INIT(input, len) { (input), 0, 0, (len), STRBUF_INIT }
+
+extern int move_window(struct sliding_view *view, off_t off, size_t width);
+
+#endif
diff --git a/vcs-svn/string_pool.c b/vcs-svn/string_pool.c
deleted file mode 100644
index 1b63b19a38..0000000000
--- a/vcs-svn/string_pool.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed under a two-clause BSD-style license.
- * See LICENSE for details.
- */
-
-#include "git-compat-util.h"
-#include "trp.h"
-#include "obj_pool.h"
-#include "string_pool.h"
-
-static struct trp_root tree = { ~0U };
-
-struct node {
- uint32_t offset;
- struct trp_node children;
-};
-
-/* Two memory pools: one for struct node, and another for strings */
-obj_pool_gen(node, struct node, 4096)
-obj_pool_gen(string, char, 4096)
-
-static char *node_value(struct node *node)
-{
- return node ? string_pointer(node->offset) : NULL;
-}
-
-static int node_cmp(struct node *a, struct node *b)
-{
- return strcmp(node_value(a), node_value(b));
-}
-
-/* Build a Treap from the node structure (a trp_node w/ offset) */
-trp_gen(static, tree_, struct node, children, node, node_cmp)
-
-const char *pool_fetch(uint32_t entry)
-{
- return node_value(node_pointer(entry));
-}
-
-uint32_t pool_intern(const char *key)
-{
- /* Canonicalize key */
- struct node *match = NULL, *node;
- uint32_t key_len;
- if (key == NULL)
- return ~0;
- key_len = strlen(key) + 1;
- node = node_pointer(node_alloc(1));
- node->offset = string_alloc(key_len);
- strcpy(node_value(node), key);
- match = tree_search(&tree, node);
- if (!match) {
- tree_insert(&tree, node);
- } else {
- node_free(1);
- string_free(key_len);
- node = match;
- }
- return node_offset(node);
-}
-
-uint32_t pool_tok_r(char *str, const char *delim, char **saveptr)
-{
- char *token = strtok_r(str, delim, saveptr);
- return token ? pool_intern(token) : ~0;
-}
-
-void pool_print_seq(uint32_t len, uint32_t *seq, char delim, FILE *stream)
-{
- uint32_t i;
- for (i = 0; i < len && ~seq[i]; i++) {
- fputs(pool_fetch(seq[i]), stream);
- if (i < len - 1 && ~seq[i + 1])
- fputc(delim, stream);
- }
-}
-
-uint32_t pool_tok_seq(uint32_t sz, uint32_t *seq, const char *delim, char *str)
-{
- char *context = NULL;
- uint32_t token = ~0U;
- uint32_t length;
-
- if (sz == 0)
- return ~0;
- if (str)
- token = pool_tok_r(str, delim, &context);
- for (length = 0; length < sz; length++) {
- seq[length] = token;
- if (token == ~0)
- return length;
- token = pool_tok_r(NULL, delim, &context);
- }
- seq[sz - 1] = ~0;
- return sz;
-}
-
-void pool_reset(void)
-{
- node_reset();
- string_reset();
-}
diff --git a/vcs-svn/string_pool.h b/vcs-svn/string_pool.h
deleted file mode 100644
index 222fb66e68..0000000000
--- a/vcs-svn/string_pool.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef STRING_POOL_H_
-#define STRING_POOL_H_
-
-uint32_t pool_intern(const char *key);
-const char *pool_fetch(uint32_t entry);
-uint32_t pool_tok_r(char *str, const char *delim, char **saveptr);
-void pool_print_seq(uint32_t len, uint32_t *seq, char delim, FILE *stream);
-uint32_t pool_tok_seq(uint32_t sz, uint32_t *seq, const char *delim, char *str);
-void pool_reset(void);
-
-#endif
diff --git a/vcs-svn/string_pool.txt b/vcs-svn/string_pool.txt
deleted file mode 100644
index 1b41f15628..0000000000
--- a/vcs-svn/string_pool.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-string_pool API
-===============
-
-The string_pool API provides facilities for replacing strings
-with integer keys that can be more easily compared and stored.
-The facilities are designed so that one could teach Git without
-too much trouble to store the information needed for these keys to
-remain valid over multiple executions.
-
-Functions
----------
-
-pool_intern::
- Include a string in the string pool and get its key.
- If that string is already in the pool, retrieves its
- existing key.
-
-pool_fetch::
- Retrieve the string associated to a given key.
-
-pool_tok_r::
- Extract the key of the next token from a string.
- Interface mimics strtok_r.
-
-pool_print_seq::
- Print a sequence of strings named by key to a file, using the
- specified delimiter to separate them.
-
- If NULL (key ~0) appears in the sequence, the sequence ends
- early.
-
-pool_tok_seq::
- Split a string into tokens, storing the keys of segments
- into a caller-provided array.
-
- Unless sz is 0, the array will always be ~0-terminated.
- If there is not enough room for all the tokens, the
- array holds as many tokens as fit in the entries before
- the terminating ~0. Return value is the index after the
- last token, or sz if the tokens did not fit.
-
-pool_reset::
- Deallocate storage for the string pool.
diff --git a/vcs-svn/svndiff.c b/vcs-svn/svndiff.c
new file mode 100644
index 0000000000..1647c1a780
--- /dev/null
+++ b/vcs-svn/svndiff.c
@@ -0,0 +1,308 @@
+/*
+ * Licensed under a two-clause BSD-style license.
+ * See LICENSE for details.
+ */
+
+#include "git-compat-util.h"
+#include "sliding_window.h"
+#include "line_buffer.h"
+#include "svndiff.h"
+
+/*
+ * svndiff0 applier
+ *
+ * See http://svn.apache.org/repos/asf/subversion/trunk/notes/svndiff.
+ *
+ * svndiff0 ::= 'SVN\0' window*
+ * window ::= int int int int int instructions inline_data;
+ * instructions ::= instruction*;
+ * instruction ::= view_selector int int
+ * | copyfrom_data int
+ * | packed_view_selector int
+ * | packed_copyfrom_data
+ * ;
+ * view_selector ::= copyfrom_source
+ * | copyfrom_target
+ * ;
+ * copyfrom_source ::= # binary 00 000000;
+ * copyfrom_target ::= # binary 01 000000;
+ * copyfrom_data ::= # binary 10 000000;
+ * packed_view_selector ::= # view_selector OR-ed with 6 bit value;
+ * packed_copyfrom_data ::= # copyfrom_data OR-ed with 6 bit value;
+ * int ::= highdigit* lowdigit;
+ * highdigit ::= # binary 1000 0000 OR-ed with 7 bit value;
+ * lowdigit ::= # 7 bit value;
+ */
+
+#define INSN_MASK 0xc0
+#define INSN_COPYFROM_SOURCE 0x00
+#define INSN_COPYFROM_TARGET 0x40
+#define INSN_COPYFROM_DATA 0x80
+#define OPERAND_MASK 0x3f
+
+#define VLI_CONTINUE 0x80
+#define VLI_DIGIT_MASK 0x7f
+#define VLI_BITS_PER_DIGIT 7
+
+struct window {
+ struct sliding_view *in;
+ struct strbuf out;
+ struct strbuf instructions;
+ struct strbuf data;
+};
+
+#define WINDOW_INIT(w) { (w), STRBUF_INIT, STRBUF_INIT, STRBUF_INIT }
+
+static void window_release(struct window *ctx)
+{
+ strbuf_release(&ctx->out);
+ strbuf_release(&ctx->instructions);
+ strbuf_release(&ctx->data);
+}
+
+static int write_strbuf(struct strbuf *sb, FILE *out)
+{
+ if (fwrite(sb->buf, 1, sb->len, out) == sb->len) /* Success. */
+ return 0;
+ return error("cannot write delta postimage: %s", strerror(errno));
+}
+
+static int error_short_read(struct line_buffer *input)
+{
+ if (buffer_ferror(input))
+ return error("error reading delta: %s", strerror(errno));
+ return error("invalid delta: unexpected end of file");
+}
+
+static int read_chunk(struct line_buffer *delta, off_t *delta_len,
+ struct strbuf *buf, size_t len)
+{
+ strbuf_reset(buf);
+ if (len > *delta_len ||
+ buffer_read_binary(delta, buf, len) != len)
+ return error_short_read(delta);
+ *delta_len -= buf->len;
+ return 0;
+}
+
+static int read_magic(struct line_buffer *in, off_t *len)
+{
+ static const char magic[] = {'S', 'V', 'N', '\0'};
+ struct strbuf sb = STRBUF_INIT;
+
+ if (read_chunk(in, len, &sb, sizeof(magic))) {
+ strbuf_release(&sb);
+ return -1;
+ }
+ if (memcmp(sb.buf, magic, sizeof(magic))) {
+ strbuf_release(&sb);
+ return error("invalid delta: unrecognized file type");
+ }
+ strbuf_release(&sb);
+ return 0;
+}
+
+static int read_int(struct line_buffer *in, uintmax_t *result, off_t *len)
+{
+ uintmax_t rv = 0;
+ off_t sz;
+ for (sz = *len; sz; sz--) {
+ const int ch = buffer_read_char(in);
+ if (ch == EOF)
+ break;
+
+ rv <<= VLI_BITS_PER_DIGIT;
+ rv += (ch & VLI_DIGIT_MASK);
+ if (ch & VLI_CONTINUE)
+ continue;
+
+ *result = rv;
+ *len = sz - 1;
+ return 0;
+ }
+ return error_short_read(in);
+}
+
+static int parse_int(const char **buf, size_t *result, const char *end)
+{
+ size_t rv = 0;
+ const char *pos;
+ for (pos = *buf; pos != end; pos++) {
+ unsigned char ch = *pos;
+
+ rv <<= VLI_BITS_PER_DIGIT;
+ rv += (ch & VLI_DIGIT_MASK);
+ if (ch & VLI_CONTINUE)
+ continue;
+
+ *result = rv;
+ *buf = pos + 1;
+ return 0;
+ }
+ return error("invalid delta: unexpected end of instructions section");
+}
+
+static int read_offset(struct line_buffer *in, off_t *result, off_t *len)
+{
+ uintmax_t val;
+ if (read_int(in, &val, len))
+ return -1;
+ if (val > maximum_signed_value_of_type(off_t))
+ return error("unrepresentable offset in delta: %"PRIuMAX"", val);
+ *result = val;
+ return 0;
+}
+
+static int read_length(struct line_buffer *in, size_t *result, off_t *len)
+{
+ uintmax_t val;
+ if (read_int(in, &val, len))
+ return -1;
+ if (val > SIZE_MAX)
+ return error("unrepresentable length in delta: %"PRIuMAX"", val);
+ *result = val;
+ return 0;
+}
+
+static int copyfrom_source(struct window *ctx, const char **instructions,
+ size_t nbytes, const char *insns_end)
+{
+ size_t offset;
+ if (parse_int(instructions, &offset, insns_end))
+ return -1;
+ if (unsigned_add_overflows(offset, nbytes) ||
+ offset + nbytes > ctx->in->width)
+ return error("invalid delta: copies source data outside view");
+ strbuf_add(&ctx->out, ctx->in->buf.buf + offset, nbytes);
+ return 0;
+}
+
+static int copyfrom_target(struct window *ctx, const char **instructions,
+ size_t nbytes, const char *instructions_end)
+{
+ size_t offset;
+ if (parse_int(instructions, &offset, instructions_end))
+ return -1;
+ if (offset >= ctx->out.len)
+ return error("invalid delta: copies from the future");
+ for (; nbytes > 0; nbytes--)
+ strbuf_addch(&ctx->out, ctx->out.buf[offset++]);
+ return 0;
+}
+
+static int copyfrom_data(struct window *ctx, size_t *data_pos, size_t nbytes)
+{
+ const size_t pos = *data_pos;
+ if (unsigned_add_overflows(pos, nbytes) ||
+ pos + nbytes > ctx->data.len)
+ return error("invalid delta: copies unavailable inline data");
+ strbuf_add(&ctx->out, ctx->data.buf + pos, nbytes);
+ *data_pos += nbytes;
+ return 0;
+}
+
+static int parse_first_operand(const char **buf, size_t *out, const char *end)
+{
+ size_t result = (unsigned char) *(*buf)++ & OPERAND_MASK;
+ if (result) { /* immediate operand */
+ *out = result;
+ return 0;
+ }
+ return parse_int(buf, out, end);
+}
+
+static int execute_one_instruction(struct window *ctx,
+ const char **instructions, size_t *data_pos)
+{
+ unsigned int instruction;
+ const char *insns_end = ctx->instructions.buf + ctx->instructions.len;
+ size_t nbytes;
+ assert(ctx);
+ assert(instructions && *instructions);
+ assert(data_pos);
+
+ instruction = (unsigned char) **instructions;
+ if (parse_first_operand(instructions, &nbytes, insns_end))
+ return -1;
+ switch (instruction & INSN_MASK) {
+ case INSN_COPYFROM_SOURCE:
+ return copyfrom_source(ctx, instructions, nbytes, insns_end);
+ case INSN_COPYFROM_TARGET:
+ return copyfrom_target(ctx, instructions, nbytes, insns_end);
+ case INSN_COPYFROM_DATA:
+ return copyfrom_data(ctx, data_pos, nbytes);
+ default:
+ return error("invalid delta: unrecognized instruction");
+ }
+}
+
+static int apply_window_in_core(struct window *ctx)
+{
+ const char *instructions;
+ size_t data_pos = 0;
+
+ /*
+ * Fill ctx->out.buf using data from the source, target,
+ * and inline data views.
+ */
+ for (instructions = ctx->instructions.buf;
+ instructions != ctx->instructions.buf + ctx->instructions.len;
+ )
+ if (execute_one_instruction(ctx, &instructions, &data_pos))
+ return -1;
+ if (data_pos != ctx->data.len)
+ return error("invalid delta: does not copy all inline data");
+ return 0;
+}
+
+static int apply_one_window(struct line_buffer *delta, off_t *delta_len,
+ struct sliding_view *preimage, FILE *out)
+{
+ struct window ctx = WINDOW_INIT(preimage);
+ size_t out_len;
+ size_t instructions_len;
+ size_t data_len;
+ assert(delta_len);
+
+ /* "source view" offset and length already handled; */
+ if (read_length(delta, &out_len, delta_len) ||
+ read_length(delta, &instructions_len, delta_len) ||
+ read_length(delta, &data_len, delta_len) ||
+ read_chunk(delta, delta_len, &ctx.instructions, instructions_len) ||
+ read_chunk(delta, delta_len, &ctx.data, data_len))
+ goto error_out;
+ strbuf_grow(&ctx.out, out_len);
+ if (apply_window_in_core(&ctx))
+ goto error_out;
+ if (ctx.out.len != out_len) {
+ error("invalid delta: incorrect postimage length");
+ goto error_out;
+ }
+ if (write_strbuf(&ctx.out, out))
+ goto error_out;
+ window_release(&ctx);
+ return 0;
+error_out:
+ window_release(&ctx);
+ return -1;
+}
+
+int svndiff0_apply(struct line_buffer *delta, off_t delta_len,
+ struct sliding_view *preimage, FILE *postimage)
+{
+ assert(delta && preimage && postimage);
+
+ if (read_magic(delta, &delta_len))
+ return -1;
+ while (delta_len) { /* For each window: */
+ off_t pre_off = pre_off; /* stupid GCC... */
+ size_t pre_len;
+
+ if (read_offset(delta, &pre_off, &delta_len) ||
+ read_length(delta, &pre_len, &delta_len) ||
+ move_window(preimage, pre_off, pre_len) ||
+ apply_one_window(delta, &delta_len, preimage, postimage))
+ return -1;
+ }
+ return 0;
+}
diff --git a/vcs-svn/svndiff.h b/vcs-svn/svndiff.h
new file mode 100644
index 0000000000..74eb464bab
--- /dev/null
+++ b/vcs-svn/svndiff.h
@@ -0,0 +1,10 @@
+#ifndef SVNDIFF_H_
+#define SVNDIFF_H_
+
+struct line_buffer;
+struct sliding_view;
+
+extern int svndiff0_apply(struct line_buffer *delta, off_t delta_len,
+ struct sliding_view *preimage, FILE *postimage);
+
+#endif
diff --git a/vcs-svn/svndump.c b/vcs-svn/svndump.c
index bc792223b2..644fdc71ba 100644
--- a/vcs-svn/svndump.c
+++ b/vcs-svn/svndump.c
@@ -11,7 +11,6 @@
#include "repo_tree.h"
#include "fast_export.h"
#include "line_buffer.h"
-#include "string_pool.h"
#include "strbuf.h"
#include "svndump.h"
@@ -21,15 +20,19 @@
*/
#define constcmp(s, ref) memcmp(s, ref, sizeof(ref) - 1)
+#define REPORT_FILENO 3
+
#define NODEACT_REPLACE 4
#define NODEACT_DELETE 3
#define NODEACT_ADD 2
#define NODEACT_CHANGE 1
#define NODEACT_UNKNOWN 0
-#define DUMP_CTX 0
-#define REV_CTX 1
-#define NODE_CTX 2
+/* States: */
+#define DUMP_CTX 0 /* dump metadata */
+#define REV_CTX 1 /* revision metadata */
+#define NODE_CTX 2 /* node metadata */
+#define INTERNODE_CTX 3 /* between nodes */
#define LENGTH_UNKNOWN (~0)
#define DATE_RFC2822_LEN 31
@@ -37,8 +40,9 @@
static struct line_buffer input = LINE_BUFFER_INIT;
static struct {
- uint32_t action, propLength, textLength, srcRev, type;
- uint32_t src[REPO_MAX_PATH_DEPTH], dst[REPO_MAX_PATH_DEPTH];
+ uint32_t action, propLength, srcRev, type;
+ off_t text_length;
+ struct strbuf src, dst;
uint32_t text_delta, prop_delta;
} node_ctx;
@@ -58,10 +62,12 @@ static void reset_node_ctx(char *fname)
node_ctx.type = 0;
node_ctx.action = NODEACT_UNKNOWN;
node_ctx.propLength = LENGTH_UNKNOWN;
- node_ctx.textLength = LENGTH_UNKNOWN;
- node_ctx.src[0] = ~0;
+ node_ctx.text_length = -1;
+ strbuf_reset(&node_ctx.src);
node_ctx.srcRev = 0;
- pool_tok_seq(REPO_MAX_PATH_DEPTH, node_ctx.dst, "/", fname);
+ strbuf_reset(&node_ctx.dst);
+ if (fname)
+ strbuf_addstr(&node_ctx.dst, fname);
node_ctx.text_delta = 0;
node_ctx.prop_delta = 0;
}
@@ -202,28 +208,32 @@ static void read_props(void)
static void handle_node(void)
{
- uint32_t mark = 0;
const uint32_t type = node_ctx.type;
const int have_props = node_ctx.propLength != LENGTH_UNKNOWN;
- const int have_text = node_ctx.textLength != LENGTH_UNKNOWN;
+ const int have_text = node_ctx.text_length != -1;
+ /*
+ * Old text for this node:
+ * NULL - directory or bug
+ * empty_blob - empty
+ * "<dataref>" - data retrievable from fast-import
+ */
+ static const char *const empty_blob = "::empty::";
+ const char *old_data = NULL;
+ uint32_t old_mode = REPO_MODE_BLB;
- if (node_ctx.text_delta)
- die("text deltas not supported");
- if (have_text)
- mark = next_blob_mark();
if (node_ctx.action == NODEACT_DELETE) {
if (have_text || have_props || node_ctx.srcRev)
die("invalid dump: deletion node has "
"copyfrom info, text, or properties");
- repo_delete(node_ctx.dst);
+ repo_delete(node_ctx.dst.buf);
return;
}
if (node_ctx.action == NODEACT_REPLACE) {
- repo_delete(node_ctx.dst);
+ repo_delete(node_ctx.dst.buf);
node_ctx.action = NODEACT_ADD;
}
if (node_ctx.srcRev) {
- repo_copy(node_ctx.srcRev, node_ctx.src, node_ctx.dst);
+ repo_copy(node_ctx.srcRev, node_ctx.src.buf, node_ctx.dst.buf);
if (node_ctx.action == NODEACT_ADD)
node_ctx.action = NODEACT_CHANGE;
}
@@ -231,23 +241,27 @@ static void handle_node(void)
die("invalid dump: directories cannot have text attached");
/*
- * Decide on the new content (mark) and mode (node_ctx.type).
+ * Find old content (old_data) and decide on the new mode.
*/
- if (node_ctx.action == NODEACT_CHANGE && !~*node_ctx.dst) {
+ if (node_ctx.action == NODEACT_CHANGE && !*node_ctx.dst.buf) {
if (type != REPO_MODE_DIR)
die("invalid dump: root of tree is not a regular file");
+ old_data = NULL;
} else if (node_ctx.action == NODEACT_CHANGE) {
uint32_t mode;
- if (!have_text)
- mark = repo_read_path(node_ctx.dst);
- mode = repo_read_mode(node_ctx.dst);
+ old_data = repo_read_path(node_ctx.dst.buf, &mode);
if (mode == REPO_MODE_DIR && type != REPO_MODE_DIR)
die("invalid dump: cannot modify a directory into a file");
if (mode != REPO_MODE_DIR && type == REPO_MODE_DIR)
die("invalid dump: cannot modify a file into a directory");
node_ctx.type = mode;
+ old_mode = mode;
} else if (node_ctx.action == NODEACT_ADD) {
- if (!have_text && type != REPO_MODE_DIR)
+ if (type == REPO_MODE_DIR)
+ old_data = NULL;
+ else if (have_text)
+ old_data = empty_blob;
+ else
die("invalid dump: adds node without text");
} else {
die("invalid dump: Node-path block lacks Node-action");
@@ -266,18 +280,39 @@ static void handle_node(void)
/*
* Save the result.
*/
- repo_add(node_ctx.dst, node_ctx.type, mark);
- if (have_text)
- fast_export_blob(node_ctx.type, mark,
- node_ctx.textLength, &input);
+ if (type == REPO_MODE_DIR) /* directories are not tracked. */
+ return;
+ assert(old_data);
+ if (old_data == empty_blob)
+ /* For the fast_export_* functions, NULL means empty. */
+ old_data = NULL;
+ if (!have_text) {
+ fast_export_modify(node_ctx.dst.buf, node_ctx.type, old_data);
+ return;
+ }
+ if (!node_ctx.text_delta) {
+ fast_export_modify(node_ctx.dst.buf, node_ctx.type, "inline");
+ fast_export_data(node_ctx.type, node_ctx.text_length, &input);
+ return;
+ }
+ fast_export_modify(node_ctx.dst.buf, node_ctx.type, "inline");
+ fast_export_blob_delta(node_ctx.type, old_mode, old_data,
+ node_ctx.text_length, &input);
}
-static void handle_revision(void)
+static void begin_revision(void)
+{
+ if (!rev_ctx.revision) /* revision 0 gets no git commit. */
+ return;
+ fast_export_begin_commit(rev_ctx.revision, rev_ctx.author.buf,
+ &rev_ctx.log, dump_ctx.uuid.buf, dump_ctx.url.buf,
+ rev_ctx.timestamp);
+}
+
+static void end_revision(void)
{
if (rev_ctx.revision)
- repo_commit(rev_ctx.revision, rev_ctx.author.buf,
- &rev_ctx.log, dump_ctx.uuid.buf, dump_ctx.url.buf,
- rev_ctx.timestamp);
+ fast_export_end_commit(rev_ctx.revision);
}
void svndump_read(const char *url)
@@ -318,8 +353,10 @@ void svndump_read(const char *url)
continue;
if (active_ctx == NODE_CTX)
handle_node();
+ if (active_ctx == REV_CTX)
+ begin_revision();
if (active_ctx != DUMP_CTX)
- handle_revision();
+ end_revision();
active_ctx = REV_CTX;
reset_rev_ctx(atoi(val));
break;
@@ -329,6 +366,8 @@ void svndump_read(const char *url)
if (!constcmp(t + strlen("Node-"), "path")) {
if (active_ctx == NODE_CTX)
handle_node();
+ if (active_ctx == REV_CTX)
+ begin_revision();
active_ctx = NODE_CTX;
reset_node_ctx(val);
break;
@@ -361,7 +400,8 @@ void svndump_read(const char *url)
case sizeof("Node-copyfrom-path"):
if (constcmp(t, "Node-copyfrom-path"))
continue;
- pool_tok_seq(REPO_MAX_PATH_DEPTH, node_ctx.src, "/", val);
+ strbuf_reset(&node_ctx.src);
+ strbuf_addstr(&node_ctx.src, val);
break;
case sizeof("Node-copyfrom-rev"):
if (constcmp(t, "Node-copyfrom-rev"))
@@ -370,7 +410,15 @@ void svndump_read(const char *url)
break;
case sizeof("Text-content-length"):
if (!constcmp(t, "Text-content-length")) {
- node_ctx.textLength = atoi(val);
+ char *end;
+ uintmax_t textlen;
+
+ textlen = strtoumax(val, &end, 10);
+ if (!isdigit(*val) || *end)
+ die("invalid dump: non-numeric length %s", val);
+ if (textlen > maximum_signed_value_of_type(off_t))
+ die("unrepresentable length in dump: %s", val);
+ node_ctx.text_length = (off_t) textlen;
break;
}
if (constcmp(t, "Prop-content-length"))
@@ -399,7 +447,7 @@ void svndump_read(const char *url)
read_props();
} else if (active_ctx == NODE_CTX) {
handle_node();
- active_ctx = REV_CTX;
+ active_ctx = INTERNODE_CTX;
} else {
fprintf(stderr, "Unexpected content length header: %"PRIu32"\n", len);
if (buffer_skip_bytes(&input, len) != len)
@@ -411,19 +459,23 @@ void svndump_read(const char *url)
die_short_read();
if (active_ctx == NODE_CTX)
handle_node();
+ if (active_ctx == REV_CTX)
+ begin_revision();
if (active_ctx != DUMP_CTX)
- handle_revision();
+ end_revision();
}
int svndump_init(const char *filename)
{
if (buffer_init(&input, filename))
return error("cannot open %s: %s", filename, strerror(errno));
- repo_init();
+ fast_export_init(REPORT_FILENO);
strbuf_init(&dump_ctx.uuid, 4096);
strbuf_init(&dump_ctx.url, 4096);
strbuf_init(&rev_ctx.log, 4096);
strbuf_init(&rev_ctx.author, 4096);
+ strbuf_init(&node_ctx.src, 4096);
+ strbuf_init(&node_ctx.dst, 4096);
reset_dump_ctx(NULL);
reset_rev_ctx(0);
reset_node_ctx(NULL);
@@ -432,11 +484,13 @@ int svndump_init(const char *filename)
void svndump_deinit(void)
{
- repo_reset();
+ fast_export_deinit();
reset_dump_ctx(NULL);
reset_rev_ctx(0);
reset_node_ctx(NULL);
strbuf_release(&rev_ctx.log);
+ strbuf_release(&node_ctx.src);
+ strbuf_release(&node_ctx.dst);
if (buffer_deinit(&input))
fprintf(stderr, "Input error\n");
if (ferror(stdout))
@@ -445,8 +499,8 @@ void svndump_deinit(void)
void svndump_reset(void)
{
+ fast_export_reset();
buffer_reset(&input);
- repo_reset();
strbuf_release(&dump_ctx.uuid);
strbuf_release(&dump_ctx.url);
strbuf_release(&rev_ctx.log);
diff --git a/vcs-svn/trp.h b/vcs-svn/trp.h
deleted file mode 100644
index c32b9184e9..0000000000
--- a/vcs-svn/trp.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * C macro implementation of treaps.
- *
- * Usage:
- * #include <stdint.h>
- * #include "trp.h"
- * trp_gen(...)
- *
- * Licensed under a two-clause BSD-style license.
- * See LICENSE for details.
- */
-
-#ifndef TRP_H_
-#define TRP_H_
-
-#define MAYBE_UNUSED __attribute__((__unused__))
-
-/* Node structure. */
-struct trp_node {
- uint32_t trpn_left;
- uint32_t trpn_right;
-};
-
-/* Root structure. */
-struct trp_root {
- uint32_t trp_root;
-};
-
-/* Pointer/Offset conversion. */
-#define trpn_pointer(a_base, a_offset) (a_base##_pointer(a_offset))
-#define trpn_offset(a_base, a_pointer) (a_base##_offset(a_pointer))
-#define trpn_modify(a_base, a_offset) \
- do { \
- if ((a_offset) < a_base##_pool.committed) { \
- uint32_t old_offset = (a_offset);\
- (a_offset) = a_base##_alloc(1); \
- *trpn_pointer(a_base, a_offset) = \
- *trpn_pointer(a_base, old_offset); \
- } \
- } while (0)
-
-/* Left accessors. */
-#define trp_left_get(a_base, a_field, a_node) \
- (trpn_pointer(a_base, a_node)->a_field.trpn_left)
-#define trp_left_set(a_base, a_field, a_node, a_left) \
- do { \
- trpn_modify(a_base, a_node); \
- trp_left_get(a_base, a_field, a_node) = (a_left); \
- } while (0)
-
-/* Right accessors. */
-#define trp_right_get(a_base, a_field, a_node) \
- (trpn_pointer(a_base, a_node)->a_field.trpn_right)
-#define trp_right_set(a_base, a_field, a_node, a_right) \
- do { \
- trpn_modify(a_base, a_node); \
- trp_right_get(a_base, a_field, a_node) = (a_right); \
- } while (0)
-
-/*
- * Fibonacci hash function.
- * The multiplier is the nearest prime to (2^32 times (√5 - 1)/2).
- * See Knuth §6.4: volume 3, 3rd ed, p518.
- */
-#define trpn_hash(a_node) (uint32_t) (2654435761u * (a_node))
-
-/* Priority accessors. */
-#define trp_prio_get(a_node) trpn_hash(a_node)
-
-/* Node initializer. */
-#define trp_node_new(a_base, a_field, a_node) \
- do { \
- trp_left_set(a_base, a_field, (a_node), ~0); \
- trp_right_set(a_base, a_field, (a_node), ~0); \
- } while (0)
-
-/* Internal utility macros. */
-#define trpn_first(a_base, a_field, a_root, r_node) \
- do { \
- (r_node) = (a_root); \
- if ((r_node) == ~0) \
- return NULL; \
- while (~trp_left_get(a_base, a_field, (r_node))) \
- (r_node) = trp_left_get(a_base, a_field, (r_node)); \
- } while (0)
-
-#define trpn_rotate_left(a_base, a_field, a_node, r_node) \
- do { \
- (r_node) = trp_right_get(a_base, a_field, (a_node)); \
- trp_right_set(a_base, a_field, (a_node), \
- trp_left_get(a_base, a_field, (r_node))); \
- trp_left_set(a_base, a_field, (r_node), (a_node)); \
- } while (0)
-
-#define trpn_rotate_right(a_base, a_field, a_node, r_node) \
- do { \
- (r_node) = trp_left_get(a_base, a_field, (a_node)); \
- trp_left_set(a_base, a_field, (a_node), \
- trp_right_get(a_base, a_field, (r_node))); \
- trp_right_set(a_base, a_field, (r_node), (a_node)); \
- } while (0)
-
-#define trp_gen(a_attr, a_pre, a_type, a_field, a_base, a_cmp) \
-a_attr a_type MAYBE_UNUSED *a_pre##first(struct trp_root *treap) \
-{ \
- uint32_t ret; \
- trpn_first(a_base, a_field, treap->trp_root, ret); \
- return trpn_pointer(a_base, ret); \
-} \
-a_attr a_type MAYBE_UNUSED *a_pre##next(struct trp_root *treap, a_type *node) \
-{ \
- uint32_t ret; \
- uint32_t offset = trpn_offset(a_base, node); \
- if (~trp_right_get(a_base, a_field, offset)) { \
- trpn_first(a_base, a_field, \
- trp_right_get(a_base, a_field, offset), ret); \
- } else { \
- uint32_t tnode = treap->trp_root; \
- ret = ~0; \
- while (1) { \
- int cmp = (a_cmp)(trpn_pointer(a_base, offset), \
- trpn_pointer(a_base, tnode)); \
- if (cmp < 0) { \
- ret = tnode; \
- tnode = trp_left_get(a_base, a_field, tnode); \
- } else if (cmp > 0) { \
- tnode = trp_right_get(a_base, a_field, tnode); \
- } else { \
- break; \
- } \
- } \
- } \
- return trpn_pointer(a_base, ret); \
-} \
-a_attr a_type MAYBE_UNUSED *a_pre##search(struct trp_root *treap, a_type *key) \
-{ \
- int cmp; \
- uint32_t ret = treap->trp_root; \
- while (~ret && (cmp = (a_cmp)(key, trpn_pointer(a_base, ret)))) { \
- if (cmp < 0) { \
- ret = trp_left_get(a_base, a_field, ret); \
- } else { \
- ret = trp_right_get(a_base, a_field, ret); \
- } \
- } \
- return trpn_pointer(a_base, ret); \
-} \
-a_attr a_type MAYBE_UNUSED *a_pre##nsearch(struct trp_root *treap, a_type *key) \
-{ \
- int cmp; \
- uint32_t ret = treap->trp_root; \
- while (~ret && (cmp = (a_cmp)(key, trpn_pointer(a_base, ret)))) { \
- if (cmp < 0) { \
- if (!~trp_left_get(a_base, a_field, ret)) \
- break; \
- ret = trp_left_get(a_base, a_field, ret); \
- } else { \
- ret = trp_right_get(a_base, a_field, ret); \
- } \
- } \
- return trpn_pointer(a_base, ret); \
-} \
-a_attr uint32_t MAYBE_UNUSED a_pre##insert_recurse(uint32_t cur_node, uint32_t ins_node) \
-{ \
- if (cur_node == ~0) { \
- return ins_node; \
- } else { \
- uint32_t ret; \
- int cmp = (a_cmp)(trpn_pointer(a_base, ins_node), \
- trpn_pointer(a_base, cur_node)); \
- if (cmp < 0) { \
- uint32_t left = a_pre##insert_recurse( \
- trp_left_get(a_base, a_field, cur_node), ins_node); \
- trp_left_set(a_base, a_field, cur_node, left); \
- if (trp_prio_get(left) < trp_prio_get(cur_node)) \
- trpn_rotate_right(a_base, a_field, cur_node, ret); \
- else \
- ret = cur_node; \
- } else { \
- uint32_t right = a_pre##insert_recurse( \
- trp_right_get(a_base, a_field, cur_node), ins_node); \
- trp_right_set(a_base, a_field, cur_node, right); \
- if (trp_prio_get(right) < trp_prio_get(cur_node)) \
- trpn_rotate_left(a_base, a_field, cur_node, ret); \
- else \
- ret = cur_node; \
- } \
- return ret; \
- } \
-} \
-a_attr a_type *MAYBE_UNUSED a_pre##insert(struct trp_root *treap, a_type *node) \
-{ \
- uint32_t offset = trpn_offset(a_base, node); \
- trp_node_new(a_base, a_field, offset); \
- treap->trp_root = a_pre##insert_recurse(treap->trp_root, offset); \
- return trpn_pointer(a_base, offset); \
-} \
-a_attr uint32_t MAYBE_UNUSED a_pre##remove_recurse(uint32_t cur_node, uint32_t rem_node) \
-{ \
- int cmp = a_cmp(trpn_pointer(a_base, rem_node), \
- trpn_pointer(a_base, cur_node)); \
- if (cmp == 0) { \
- uint32_t ret; \
- uint32_t left = trp_left_get(a_base, a_field, cur_node); \
- uint32_t right = trp_right_get(a_base, a_field, cur_node); \
- if (left == ~0) { \
- if (right == ~0) \
- return ~0; \
- } else if (right == ~0 || trp_prio_get(left) < trp_prio_get(right)) { \
- trpn_rotate_right(a_base, a_field, cur_node, ret); \
- right = a_pre##remove_recurse(cur_node, rem_node); \
- trp_right_set(a_base, a_field, ret, right); \
- return ret; \
- } \
- trpn_rotate_left(a_base, a_field, cur_node, ret); \
- left = a_pre##remove_recurse(cur_node, rem_node); \
- trp_left_set(a_base, a_field, ret, left); \
- return ret; \
- } else if (cmp < 0) { \
- uint32_t left = a_pre##remove_recurse( \
- trp_left_get(a_base, a_field, cur_node), rem_node); \
- trp_left_set(a_base, a_field, cur_node, left); \
- return cur_node; \
- } else { \
- uint32_t right = a_pre##remove_recurse( \
- trp_right_get(a_base, a_field, cur_node), rem_node); \
- trp_right_set(a_base, a_field, cur_node, right); \
- return cur_node; \
- } \
-} \
-a_attr void MAYBE_UNUSED a_pre##remove(struct trp_root *treap, a_type *node) \
-{ \
- treap->trp_root = a_pre##remove_recurse(treap->trp_root, \
- trpn_offset(a_base, node)); \
-} \
-
-#endif
diff --git a/vcs-svn/trp.txt b/vcs-svn/trp.txt
deleted file mode 100644
index 177ebca335..0000000000
--- a/vcs-svn/trp.txt
+++ /dev/null
@@ -1,109 +0,0 @@
-Motivation
-==========
-
-Treaps provide a memory-efficient binary search tree structure.
-Insertion/deletion/search are about as about as fast in the average
-case as red-black trees and the chances of worst-case behavior are
-vanishingly small, thanks to (pseudo-)randomness. The bad worst-case
-behavior is a small price to pay, given that treaps are much simpler
-to implement.
-
-API
-===
-
-The trp API generates a data structure and functions to handle a
-large growing set of objects stored in a pool.
-
-The caller:
-
-. Specifies parameters for the generated functions with the
- trp_gen(static, foo_, ...) macro.
-
-. Allocates a `struct trp_root` variable and sets it to {~0}.
-
-. Adds new nodes to the set using `foo_insert`. Any pointers
- to existing nodes cannot be relied upon any more, so the caller
- might retrieve them anew with `foo_pointer`.
-
-. Can find a specific item in the set using `foo_search`.
-
-. Can iterate over items in the set using `foo_first` and `foo_next`.
-
-. Can remove an item from the set using `foo_remove`.
-
-Example:
-
-----
-struct ex_node {
- const char *s;
- struct trp_node ex_link;
-};
-static struct trp_root ex_base = {~0};
-obj_pool_gen(ex, struct ex_node, 4096);
-trp_gen(static, ex_, struct ex_node, ex_link, ex, strcmp)
-struct ex_node *item;
-
-item = ex_pointer(ex_alloc(1));
-item->s = "hello";
-ex_insert(&ex_base, item);
-item = ex_pointer(ex_alloc(1));
-item->s = "goodbye";
-ex_insert(&ex_base, item);
-for (item = ex_first(&ex_base); item; item = ex_next(&ex_base, item))
- printf("%s\n", item->s);
-----
-
-Functions
----------
-
-trp_gen(attr, foo_, node_type, link_field, pool, cmp)::
-
- Generate a type-specific treap implementation.
-+
-. The storage class for generated functions will be 'attr' (e.g., `static`).
-. Generated function names are prefixed with 'foo_' (e.g., `treap_`).
-. Treap nodes will be of type 'node_type' (e.g., `struct treap_node`).
- This type must be a struct with at least one `struct trp_node` field
- to point to its children.
-. The field used to access child nodes will be 'link_field'.
-. All treap nodes must lie in the 'pool' object pool.
-. Treap nodes must be totally ordered by the 'cmp' relation, with the
- following prototype:
-+
-int (*cmp)(node_type \*a, node_type \*b)
-+
-and returning a value less than, equal to, or greater than zero
-according to the result of comparison.
-
-node_type {asterisk}foo_insert(struct trp_root *treap, node_type \*node)::
-
- Insert node into treap. If inserted multiple times,
- a node will appear in the treap multiple times.
-+
-The return value is the address of the node within the treap,
-which might differ from `node` if `pool_alloc` had to call
-`realloc` to expand the pool.
-
-void foo_remove(struct trp_root *treap, node_type \*node)::
-
- Remove node from treap. Caller must ensure node is
- present in treap before using this function.
-
-node_type *foo_search(struct trp_root \*treap, node_type \*key)::
-
- Search for a node that matches key. If no match is found,
- result is NULL.
-
-node_type *foo_nsearch(struct trp_root \*treap, node_type \*key)::
-
- Like `foo_search`, but if the key is missing return what
- would be key's successor, were key in treap (NULL if no
- successor).
-
-node_type *foo_first(struct trp_root \*treap)::
-
- Find the first item from the treap, in sorted order.
-
-node_type *foo_next(struct trp_root \*treap, node_type \*node)::
-
- Find the next item.
diff --git a/xdiff/xemit.c b/xdiff/xemit.c
index 2e669c3e25..d11dbf9f13 100644
--- a/xdiff/xemit.c
+++ b/xdiff/xemit.c
@@ -87,7 +87,7 @@ static long def_ff(const char *rec, long len, char *buf, long sz, void *priv)
static int xdl_emit_common(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
xdemitconf_t const *xecfg) {
- xdfile_t *xdf = &xe->xdf1;
+ xdfile_t *xdf = &xe->xdf2;
const char *rchg = xdf->rchg;
long ix;
@@ -204,8 +204,8 @@ int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
/*
* Emit pre-context.
*/
- for (; s1 < xch->i1; s1++)
- if (xdl_emit_record(&xe->xdf1, s1, " ", ecb) < 0)
+ for (; s2 < xch->i2; s2++)
+ if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0)
return -1;
for (s1 = xch->i1, s2 = xch->i2;; xch = xch->next) {
@@ -213,7 +213,7 @@ int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
* Merge previous with current change atom.
*/
for (; s1 < xch->i1 && s2 < xch->i2; s1++, s2++)
- if (xdl_emit_record(&xe->xdf1, s1, " ", ecb) < 0)
+ if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0)
return -1;
/*
@@ -239,8 +239,8 @@ int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
/*
* Emit post-context.
*/
- for (s1 = xche->i1 + xche->chg1; s1 < e1; s1++)
- if (xdl_emit_record(&xe->xdf1, s1, " ", ecb) < 0)
+ for (s2 = xche->i2 + xche->chg2; s2 < e2; s2++)
+ if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0)
return -1;
}