summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitattributes4
-rw-r--r--.gitignore3
-rw-r--r--Documentation/Makefile2
-rw-r--r--Documentation/SubmittingPatches4
-rw-r--r--Documentation/config.txt34
-rwxr-xr-xDocumentation/doc-diff39
-rw-r--r--Documentation/fetch-options.txt15
-rw-r--r--Documentation/git-branch.txt9
-rw-r--r--Documentation/git-format-patch.txt29
-rw-r--r--Documentation/git-interpret-trailers.txt10
-rw-r--r--Documentation/git-multi-pack-index.txt56
-rw-r--r--Documentation/git-pack-objects.txt97
-rw-r--r--Documentation/git-push.txt57
-rw-r--r--Documentation/git-repack.txt5
-rw-r--r--Documentation/git-rerere.txt6
-rw-r--r--Documentation/git-worktree.txt12
-rw-r--r--Documentation/git.txt4
-rw-r--r--Documentation/gitrevisions.txt7
-rw-r--r--Documentation/pull-fetch-param.txt39
-rw-r--r--Documentation/technical/multi-pack-index.txt109
-rw-r--r--Documentation/technical/pack-format.txt77
-rw-r--r--Documentation/technical/rerere.txt186
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--Makefile8
-rw-r--r--[l---------]RelNotes113
-rw-r--r--archive.c3
-rw-r--r--attr.c8
-rw-r--r--attr.h4
-rw-r--r--bisect.c17
-rw-r--r--blame.c8
-rw-r--r--builtin.h1
-rw-r--r--builtin/add.c2
-rw-r--r--builtin/am.c6
-rw-r--r--builtin/branch.c23
-rw-r--r--builtin/check-attr.c3
-rw-r--r--builtin/checkout.c122
-rw-r--r--builtin/clone.c1
-rw-r--r--builtin/commit.c5
-rw-r--r--builtin/count-objects.c2
-rw-r--r--builtin/describe.c10
-rw-r--r--builtin/diff.c2
-rw-r--r--builtin/difftool.c4
-rw-r--r--builtin/fast-export.c2
-rw-r--r--builtin/fetch.c27
-rw-r--r--builtin/fmt-merge-msg.c7
-rw-r--r--builtin/fsck.c4
-rw-r--r--builtin/gc.c4
-rw-r--r--builtin/index-pack.c8
-rw-r--r--builtin/interpret-trailers.c1
-rw-r--r--builtin/log.c146
-rw-r--r--builtin/merge-base.c1
-rw-r--r--builtin/merge-tree.c2
-rw-r--r--builtin/merge.c7
-rw-r--r--builtin/multi-pack-index.c47
-rw-r--r--builtin/pack-objects.c212
-rw-r--r--builtin/pack-redundant.c4
-rw-r--r--builtin/pull.c5
-rw-r--r--builtin/range-diff.c25
-rw-r--r--builtin/receive-pack.c7
-rw-r--r--builtin/remote.c3
-rw-r--r--builtin/repack.c18
-rw-r--r--builtin/replace.c6
-rw-r--r--builtin/rerere.c4
-rw-r--r--builtin/rev-list.c2
-rw-r--r--builtin/rev-parse.c1
-rw-r--r--builtin/rm.c2
-rw-r--r--builtin/show-branch.c6
-rw-r--r--builtin/submodule--helper.c192
-rw-r--r--builtin/tag.c2
-rw-r--r--builtin/unpack-objects.c4
-rw-r--r--builtin/update-index.c4
-rw-r--r--builtin/worktree.c113
-rw-r--r--bulk-checkin.c2
-rw-r--r--bundle.c2
-rw-r--r--cache-tree.c82
-rw-r--r--cache-tree.h1
-rw-r--r--cache.h23
-rw-r--r--combine-diff.c4
-rw-r--r--command-list.txt1
-rw-r--r--commit-graph.c33
-rw-r--r--commit-graph.h8
-rw-r--r--commit-reach.c692
-rw-r--r--commit-reach.h77
-rw-r--r--commit.c369
-rw-r--r--commit.h31
-rw-r--r--compat/mingw.c36
-rw-r--r--config.mak.dev1
-rw-r--r--connect.c2
-rw-r--r--contrib/coccinelle/commit.cocci4
-rw-r--r--contrib/coccinelle/object_id.cocci50
-rw-r--r--convert.c42
-rw-r--r--delta-islands.c502
-rw-r--r--delta-islands.h11
-rw-r--r--diff-lib.c8
-rw-r--r--diff.c140
-rw-r--r--diff.h5
-rw-r--r--diffcore-break.c2
-rw-r--r--diffcore-rename.c2
-rw-r--r--dir.c15
-rw-r--r--entry.c31
-rw-r--r--fast-import.c15
-rw-r--r--fetch-pack.c2
-rwxr-xr-xgit-submodule.sh24
-rw-r--r--git.c1
-rw-r--r--http-backend.c4
-rw-r--r--http-push.c4
-rw-r--r--http-walker.c4
-rw-r--r--http.c2
-rw-r--r--interdiff.c28
-rw-r--r--interdiff.h8
-rw-r--r--ll-merge.c16
-rw-r--r--lockfile.h4
-rw-r--r--log-tree.c58
-rw-r--r--mailinfo.c64
-rw-r--r--mailinfo.h2
-rw-r--r--match-trees.c2
-rw-r--r--merge-recursive.c7
-rw-r--r--midx.c930
-rw-r--r--midx.h47
-rw-r--r--name-hash.c4
-rw-r--r--notes-merge.c25
-rw-r--r--notes.c8
-rw-r--r--object-store.h15
-rw-r--r--object.c2
-rw-r--r--object.h4
-rw-r--r--oidmap.c12
-rw-r--r--pack-bitmap-write.c1
-rw-r--r--pack-bitmap.c37
-rw-r--r--pack-bitmap.h7
-rw-r--r--pack-check.c6
-rw-r--r--pack-objects.c35
-rw-r--r--pack-objects.h59
-rw-r--r--pack-write.c4
-rw-r--r--packfile.c211
-rw-r--r--packfile.h10
-rw-r--r--patch-delta.c28
-rw-r--r--patch-ids.c8
-rw-r--r--preload-index.c4
-rw-r--r--pretty.c3
-rw-r--r--range-diff.c48
-rw-r--r--range-diff.h5
-rw-r--r--read-cache.c28
-rw-r--r--ref-filter.c148
-rw-r--r--refs.c8
-rw-r--r--refs/files-backend.c6
-rw-r--r--refs/packed-backend.c2
-rw-r--r--refs/ref-cache.c2
-rw-r--r--remote-curl.c2
-rw-r--r--remote.c58
-rw-r--r--remote.h1
-rw-r--r--rerere.c245
-rw-r--r--revision.c8
-rw-r--r--revision.h16
-rw-r--r--sequencer.c71
-rw-r--r--sequencer.h9
-rw-r--r--server-info.c4
-rw-r--r--sha1-array.c2
-rw-r--r--sha1-file.c12
-rw-r--r--sha1-name.c73
-rw-r--r--shallow.c1
-rw-r--r--string-list.c10
-rw-r--r--string-list.h8
-rw-r--r--submodule-config.c4
-rw-r--r--submodule.c6
-rw-r--r--t/README8
-rw-r--r--t/helper/test-delta.c8
-rw-r--r--t/helper/test-dump-cache-tree.c2
-rw-r--r--t/helper/test-reach.c142
-rw-r--r--t/helper/test-read-midx.c51
-rw-r--r--t/helper/test-tool.c5
-rw-r--r--t/helper/test-tool.h5
-rw-r--r--t/helper/test-windows-named-pipe.c72
-rw-r--r--t/perf/README25
-rwxr-xr-xt/perf/aggregate.perl69
-rwxr-xr-xt/perf/p5311-pack-bitmaps-fetch.sh45
-rw-r--r--t/perf/perf-lib.sh74
-rwxr-xr-xt/t0051-windows-named-pipe.sh17
-rwxr-xr-xt/t0090-cache-tree.sh18
-rwxr-xr-xt/t0410-partial-clone.sh2
-rwxr-xr-xt/t1090-sparse-checkout-scope.sh14
-rwxr-xr-xt/t1404-update-ref-errors.sh6
-rwxr-xr-xt/t2025-worktree-add.sh18
-rwxr-xr-xt/t2028-worktree-move.sh44
-rwxr-xr-xt/t3206-range-diff.sh62
-rwxr-xr-xt/t3404-rebase-interactive.sh7
-rwxr-xr-xt/t3405-rebase-malformed.sh2
-rwxr-xr-xt/t3415-rebase-autosquash.sh19
-rwxr-xr-xt/t3505-cherry-pick-empty.sh18
-rwxr-xr-xt/t3701-add-interactive.sh2
-rwxr-xr-xt/t4014-format-patch.sh34
-rwxr-xr-xt/t4200-rerere.sh94
-rwxr-xr-xt/t4205-log-pretty-formats.sh23
-rwxr-xr-xt/t4256-am-format-flowed.sh19
-rw-r--r--t/t4256/1/mailinfo.c1245
-rw-r--r--t/t4256/1/mailinfo.c.orig1185
-rw-r--r--t/t4256/1/patch129
-rwxr-xr-xt/t5303-pack-corruption-resilience.sh89
-rwxr-xr-xt/t5307-pack-missing-commit.sh4
-rwxr-xr-xt/t5310-pack-bitmaps.sh93
-rwxr-xr-xt/t5319-multi-pack-index.sh217
-rwxr-xr-xt/t5320-delta-islands.sh143
-rwxr-xr-xt/t5516-fetch-push.sh29
-rwxr-xr-xt/t5562-http-backend-content-length.sh4
-rwxr-xr-xt/t5601-clone.sh8
-rwxr-xr-xt/t5612-clone-refspec.sh4
-rwxr-xr-xt/t6011-rev-list-with-bad-commit.sh7
-rwxr-xr-xt/t6018-rev-list-glob.sh2
-rwxr-xr-xt/t6024-recursive-merge.sh6
-rwxr-xr-xt/t6135-pathspec-with-attrs.sh2
-rwxr-xr-xt/t6300-for-each-ref.sh23
-rwxr-xr-xt/t6600-test-reach.sh268
-rwxr-xr-xt/t7501-commit.sh16
-rwxr-xr-xt/t7513-interpret-trailers.sh42
-rw-r--r--t/test-lib-functions.sh2
-rw-r--r--t/test-lib.sh6
-rw-r--r--tempfile.c2
-rw-r--r--tempfile.h4
-rw-r--r--trace.c69
-rw-r--r--trace.h15
-rw-r--r--trailer.c62
-rw-r--r--trailer.h4
-rw-r--r--transport.c2
-rw-r--r--tree-diff.c2
-rw-r--r--unpack-trees.c207
-rw-r--r--unpack-trees.h1
-rw-r--r--upload-pack.c58
-rw-r--r--userdiff.c3
-rw-r--r--worktree.c6
-rw-r--r--ws.c44
-rw-r--r--wt-status.c10
-rw-r--r--xdiff-interface.c2
231 files changed, 9935 insertions, 1657 deletions
diff --git a/.gitattributes b/.gitattributes
index 1bdc91e282..49b3051641 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -9,3 +9,7 @@
/command-list.txt eol=lf
/GIT-VERSION-GEN eol=lf
/mergetools/* eol=lf
+/Documentation/git-merge.txt conflict-marker-size=32
+/Documentation/gitk.txt conflict-marker-size=32
+/Documentation/user-manual.txt conflict-marker-size=32
+/t/t????-*.sh conflict-marker-size=32
diff --git a/.gitignore b/.gitignore
index ffceea7d59..9d1363a1eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -99,8 +99,9 @@
/git-mergetool--lib
/git-mktag
/git-mktree
-/git-name-rev
+/git-multi-pack-index
/git-mv
+/git-name-rev
/git-notes
/git-p4
/git-pack-redundant
diff --git a/Documentation/Makefile b/Documentation/Makefile
index a42dcfc745..95f6a321f2 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -344,7 +344,7 @@ $(OBSOLETE_HTML): %.html : %.txto asciidoc.conf
mv $@+ $@
manpage-base-url.xsl: manpage-base-url.xsl.in
- sed "s|@@MAN_BASE_URL@@|$(MAN_BASE_URL)|" $< > $@
+ $(QUIET_GEN)sed "s|@@MAN_BASE_URL@@|$(MAN_BASE_URL)|" $< > $@
%.1 %.5 %.7 : %.xml manpage-base-url.xsl
$(QUIET_XMLTO)$(RM) $@ && \
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index b44fd51f27..ec8b205145 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -80,7 +80,9 @@ GitHub-Travis CI hints section for details.
Do not forget to update the documentation to describe the updated
behavior and make sure that the resulting documentation set formats
-well. It is currently a liberal mixture of US and UK English norms for
+well (try the Documentation/doc-diff script).
+
+We currently have a liberal mixture of US and UK English norms for
spelling and grammar, which is somewhat unfortunate. A huge patch that
touches the files all over the place only to correct the inconsistency
is not welcome, though. Potential clashes with other changes that can
diff --git a/Documentation/config.txt b/Documentation/config.txt
index eb66a11975..ad0f4510c3 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -937,6 +937,11 @@ core.useReplaceRefs::
option was given on the command line. See linkgit:git[1] and
linkgit:git-replace[1] for more information.
+core.multiPackIndex::
+ Use the multi-pack-index file to track multiple packfiles using a
+ single index. See link:technical/multi-pack-index.html[the
+ multi-pack-index design document].
+
core.sparseCheckout::
Enable "sparse checkout" feature. See section "Sparse checkout" in
linkgit:git-read-tree[1] for more information.
@@ -1154,6 +1159,14 @@ and by linkgit:git-worktree[1] when 'git worktree add' refers to a
remote branch. This setting might be used for other checkout-like
commands or functionality in the future.
+checkout.optimizeNewBranch::
+ Optimizes the performance of "git checkout -b <new_branch>" when
+ using sparse-checkout. When set to true, git will not update the
+ repo based on the current sparse-checkout settings. This means it
+ will not update the skip-worktree bit in the index nor add/remove
+ files in the working directory to reflect the current sparse checkout
+ settings nor will it show the local changes.
+
clean.requireForce::
A boolean to make git-clean do nothing unless given -f,
-i or -n. Defaults to true.
@@ -2671,6 +2684,21 @@ Note that changing the compression level will not automatically recompress
all existing objects. You can force recompression by passing the -F option
to linkgit:git-repack[1].
+pack.island::
+ An extended regular expression configuring a set of delta
+ islands. See "DELTA ISLANDS" in linkgit:git-pack-objects[1]
+ for details.
+
+pack.islandCore::
+ Specify an island name which gets to have its objects be
+ packed first. This creates a kind of pseudo-pack at the front
+ of one pack, so that the objects from the specified island are
+ hopefully faster to copy into any pack that should be served
+ to a user requesting these objects. In practice this means
+ that the island specified should likely correspond to what is
+ the most commonly cloned in the repo. See also "DELTA ISLANDS"
+ in linkgit:git-pack-objects[1].
+
pack.deltaCacheSize::
The maximum memory in bytes used for caching deltas in
linkgit:git-pack-objects[1] before writing them out to a pack.
@@ -2828,6 +2856,8 @@ protocol.version::
* `1` - the original wire protocol with the addition of a version string
in the initial response from the server.
+* `2` - link:technical/protocol-v2.html[wire protocol version 2].
+
--
pull.ff::
@@ -3205,6 +3235,10 @@ repack.packKeptObjects::
index is being written (either via `--write-bitmap-index` or
`repack.writeBitmaps`).
+repack.useDeltaIslands::
+ If set to true, makes `git repack` act as if `--delta-islands`
+ was passed. Defaults to `false`.
+
repack.writeBitmaps::
When true, git will write a bitmap index when packing all
objects to disk (e.g., when `git repack -a` is run). This
diff --git a/Documentation/doc-diff b/Documentation/doc-diff
index f483fe427c..dfd9418778 100755
--- a/Documentation/doc-diff
+++ b/Documentation/doc-diff
@@ -1,21 +1,34 @@
#!/bin/sh
+#
+# Build two documentation trees and diff the resulting formatted output.
+# Compared to a source diff, this can reveal mistakes in the formatting.
+# For example:
+#
+# ./doc-diff origin/master HEAD
+#
+# would show the differences introduced by a branch based on master.
OPTIONS_SPEC="\
doc-diff [options] <from> <to> [-- <diff-options>]
+doc-diff (-c|--clean)
--
j=n parallel argument to pass to make
f force rebuild; do not rely on cached results
+c,clean cleanup temporary working files
"
SUBDIRECTORY_OK=1
. "$(git --exec-path)/git-sh-setup"
parallel=
force=
+clean=
while test $# -gt 0
do
case "$1" in
-j)
parallel=$2; shift ;;
+ -c|--clean)
+ clean=t ;;
-f)
force=t ;;
--)
@@ -26,6 +39,17 @@ do
shift
done
+cd_to_toplevel
+tmp=Documentation/tmp-doc-diff
+
+if test -n "$clean"
+then
+ test $# -eq 0 || usage
+ git worktree remove --force "$tmp/worktree" 2>/dev/null
+ rm -rf "$tmp"
+ exit 0
+fi
+
if test -z "$parallel"
then
parallel=$(getconf _NPROCESSORS_ONLN 2>/dev/null)
@@ -42,9 +66,6 @@ to=$1; shift
from_oid=$(git rev-parse --verify "$from") || exit 1
to_oid=$(git rev-parse --verify "$to") || exit 1
-cd_to_toplevel
-tmp=Documentation/tmp-doc-diff
-
if test -n "$force"
then
rm -rf "$tmp"
@@ -54,7 +75,7 @@ fi
# results that don't differ between the two trees.
if ! test -d "$tmp/worktree"
then
- git worktree add --detach "$tmp/worktree" "$from" &&
+ git worktree add -f --detach "$tmp/worktree" "$from" &&
dots=$(echo "$tmp/worktree" | sed 's#[^/]*#..#g') &&
ln -s "$dots/config.mak" "$tmp/worktree/config.mak"
fi
@@ -69,12 +90,12 @@ generate_render_makefile () {
printf '%s: %s\n' "$dst" "$src"
printf '\t@echo >&2 " RENDER $(notdir $@)" && \\\n'
printf '\tmkdir -p $(dir $@) && \\\n'
- printf '\tMANWIDTH=80 man -l $< >$@+ && \\\n'
+ printf '\tMANWIDTH=80 man $< >$@+ && \\\n'
printf '\tmv $@+ $@\n'
done
}
-# render_tree <dirname> <committish>
+# render_tree <committish_oid>
render_tree () {
# Skip install-man entirely if we already have an installed directory.
# We can't rely on make here, since "install-man" unconditionally
@@ -84,7 +105,7 @@ render_tree () {
# through.
if ! test -d "$tmp/installed/$1"
then
- git -C "$tmp/worktree" checkout "$2" &&
+ git -C "$tmp/worktree" checkout --detach "$1" &&
make -j$parallel -C "$tmp/worktree" \
GIT_VERSION=omitted \
SOURCE_DATE_EPOCH=0 \
@@ -104,6 +125,6 @@ render_tree () {
fi
}
-render_tree $from_oid "$from" &&
-render_tree $to_oid "$to" &&
+render_tree $from_oid &&
+render_tree $to_oid &&
git -C $tmp/rendered diff --no-index "$@" $from_oid $to_oid
diff --git a/Documentation/fetch-options.txt b/Documentation/fetch-options.txt
index 8bc36af4b1..fa0a3151b3 100644
--- a/Documentation/fetch-options.txt
+++ b/Documentation/fetch-options.txt
@@ -68,11 +68,16 @@ endif::git-pull[]
-f::
--force::
- When 'git fetch' is used with `<rbranch>:<lbranch>`
- refspec, it refuses to update the local branch
- `<lbranch>` unless the remote branch `<rbranch>` it
- fetches is a descendant of `<lbranch>`. This option
- overrides that check.
+ When 'git fetch' is used with `<src>:<dst>` refspec it may
+ refuse to update the local branch as discussed
+ifdef::git-pull[]
+ in the `<refspec>` part of the linkgit:git-fetch[1]
+ documentation.
+endif::git-pull[]
+ifndef::git-pull[]
+ in the `<refspec>` part below.
+endif::git-pull[]
+ This option overrides that check.
-k::
--keep::
diff --git a/Documentation/git-branch.txt b/Documentation/git-branch.txt
index 9767b2b483..bf5316ffa9 100644
--- a/Documentation/git-branch.txt
+++ b/Documentation/git-branch.txt
@@ -14,7 +14,7 @@ SYNOPSIS
[(--merged | --no-merged) [<commit>]]
[--contains [<commit]] [--no-contains [<commit>]]
[--points-at <object>] [--format=<format>] [<pattern>...]
-'git branch' [--track | --no-track] [-l] [-f] <branchname> [<start-point>]
+'git branch' [--track | --no-track] [-f] <branchname> [<start-point>]
'git branch' (--set-upstream-to=<upstream> | -u <upstream>) [<branchname>]
'git branch' --unset-upstream [<branchname>]
'git branch' (-m | -M) [<oldbranch>] <newbranch>
@@ -100,8 +100,6 @@ OPTIONS
The negated form `--no-create-reflog` only overrides an earlier
`--create-reflog`, but currently does not negate the setting of
`core.logAllRefUpdates`.
-+
-The `-l` option is a deprecated synonym for `--create-reflog`.
-f::
--force::
@@ -156,14 +154,11 @@ This option is only applicable in non-verbose mode.
--all::
List both remote-tracking branches and local branches.
+-l::
--list::
List branches. With optional `<pattern>...`, e.g. `git
branch --list 'maint-*'`, list only the branches that match
the pattern(s).
-+
-This should not be confused with `git branch -l <branchname>`,
-which creates a branch named `<branchname>` with a reflog.
-See `--create-reflog` above for details.
-v::
-vv::
diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt
index b41e1329a7..aba4c5febe 100644
--- a/Documentation/git-format-patch.txt
+++ b/Documentation/git-format-patch.txt
@@ -23,6 +23,8 @@ SYNOPSIS
[(--reroll-count|-v) <n>]
[--to=<email>] [--cc=<email>]
[--[no-]cover-letter] [--quiet] [--notes[=<ref>]]
+ [--interdiff=<previous>]
+ [--range-diff=<previous> [--creation-factor=<percent>]]
[--progress]
[<common diff options>]
[ <since> | <revision range> ]
@@ -228,6 +230,33 @@ feeding the result to `git send-email`.
containing the branch description, shortlog and the overall diffstat. You can
fill in a description in the file before sending it out.
+--interdiff=<previous>::
+ As a reviewer aid, insert an interdiff into the cover letter,
+ or as commentary of the lone patch of a 1-patch series, showing
+ the differences between the previous version of the patch series and
+ the series currently being formatted. `previous` is a single revision
+ naming the tip of the previous series which shares a common base with
+ the series being formatted (for example `git format-patch
+ --cover-letter --interdiff=feature/v1 -3 feature/v2`).
+
+--range-diff=<previous>::
+ As a reviewer aid, insert a range-diff (see linkgit:git-range-diff[1])
+ into the cover letter, or as commentary of the lone patch of a
+ 1-patch series, showing the differences between the previous
+ version of the patch series and the series currently being formatted.
+ `previous` can be a single revision naming the tip of the previous
+ series if it shares a common base with the series being formatted (for
+ example `git format-patch --cover-letter --range-diff=feature/v1 -3
+ feature/v2`), or a revision range if the two versions of the series are
+ disjoint (for example `git format-patch --cover-letter
+ --range-diff=feature/v1~3..feature/v1 -3 feature/v2`).
+
+--creation-factor=<percent>::
+ Used with `--range-diff`, tweak the heuristic which matches up commits
+ between the previous and current series of patches by adjusting the
+ creation/deletion cost fudge factor. See linkgit:git-range-diff[1])
+ for details.
+
--notes[=<ref>]::
Append the notes (see linkgit:git-notes[1]) for the commit
after the three-dash line.
diff --git a/Documentation/git-interpret-trailers.txt b/Documentation/git-interpret-trailers.txt
index b8fafb1e8b..a5e8b36f62 100644
--- a/Documentation/git-interpret-trailers.txt
+++ b/Documentation/git-interpret-trailers.txt
@@ -56,8 +56,9 @@ least one Git-generated or user-configured trailer and consists of at
least 25% trailers.
The group must be preceded by one or more empty (or whitespace-only) lines.
The group must either be at the end of the message or be the last
-non-whitespace lines before a line that starts with '---'. Such three
-minus signs start the patch part of the message.
+non-whitespace lines before a line that starts with '---' (followed by a
+space or the end of the line). Such three minus signs start the patch
+part of the message. See also `--no-divider` below.
When reading trailers, there can be whitespaces after the
token, the separator and the value. There can also be whitespaces
@@ -125,6 +126,11 @@ OPTIONS
A convenience alias for `--only-trailers --only-input
--unfold`.
+--no-divider::
+ Do not treat `---` as the end of the commit message. Use this
+ when you know your input contains just the commit message itself
+ (and not an email or the output of `git format-patch`).
+
CONFIGURATION VARIABLES
-----------------------
diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt
new file mode 100644
index 0000000000..1f97e79912
--- /dev/null
+++ b/Documentation/git-multi-pack-index.txt
@@ -0,0 +1,56 @@
+git-multi-pack-index(1)
+=======================
+
+NAME
+----
+git-multi-pack-index - Write and verify multi-pack-indexes
+
+
+SYNOPSIS
+--------
+[verse]
+'git multi-pack-index' [--object-dir=<dir>] <verb>
+
+DESCRIPTION
+-----------
+Write or verify a multi-pack-index (MIDX) file.
+
+OPTIONS
+-------
+
+--object-dir=<dir>::
+ Use given directory for the location of Git objects. We check
+ `<dir>/packs/multi-pack-index` for the current MIDX file, and
+ `<dir>/packs` for the pack-files to index.
+
+write::
+ When given as the verb, write a new MIDX file to
+ `<dir>/packs/multi-pack-index`.
+
+
+EXAMPLES
+--------
+
+* Write a MIDX file for the packfiles in the current .git folder.
++
+-----------------------------------------------
+$ git multi-pack-index write
+-----------------------------------------------
+
+* Write a MIDX file for the packfiles in an alternate object store.
++
+-----------------------------------------------
+$ git multi-pack-index --object-dir <alt> write
+-----------------------------------------------
+
+
+SEE ALSO
+--------
+See link:technical/multi-pack-index.html[The Multi-Pack-Index Design
+Document] and link:technical/pack-format.html[The Multi-Pack-Index
+Format] for more information on the multi-pack-index feature.
+
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/git-pack-objects.txt b/Documentation/git-pack-objects.txt
index d95b472d16..40c825c381 100644
--- a/Documentation/git-pack-objects.txt
+++ b/Documentation/git-pack-objects.txt
@@ -289,6 +289,103 @@ Unexpected missing object will raise an error.
--unpack-unreachable::
Keep unreachable objects in loose form. This implies `--revs`.
+--delta-islands::
+ Restrict delta matches based on "islands". See DELTA ISLANDS
+ below.
+
+
+DELTA ISLANDS
+-------------
+
+When possible, `pack-objects` tries to reuse existing on-disk deltas to
+avoid having to search for new ones on the fly. This is an important
+optimization for serving fetches, because it means the server can avoid
+inflating most objects at all and just send the bytes directly from
+disk. This optimization can't work when an object is stored as a delta
+against a base which the receiver does not have (and which we are not
+already sending). In that case the server "breaks" the delta and has to
+find a new one, which has a high CPU cost. Therefore it's important for
+performance that the set of objects in on-disk delta relationships match
+what a client would fetch.
+
+In a normal repository, this tends to work automatically. The objects
+are mostly reachable from the branches and tags, and that's what clients
+fetch. Any deltas we find on the server are likely to be between objects
+the client has or will have.
+
+But in some repository setups, you may have several related but separate
+groups of ref tips, with clients tending to fetch those groups
+independently. For example, imagine that you are hosting several "forks"
+of a repository in a single shared object store, and letting clients
+view them as separate repositories through `GIT_NAMESPACE` or separate
+repos using the alternates mechanism. A naive repack may find that the
+optimal delta for an object is against a base that is only found in
+another fork. But when a client fetches, they will not have the base
+object, and we'll have to find a new delta on the fly.
+
+A similar situation may exist if you have many refs outside of
+`refs/heads/` and `refs/tags/` that point to related objects (e.g.,
+`refs/pull` or `refs/changes` used by some hosting providers). By
+default, clients fetch only heads and tags, and deltas against objects
+found only in those other groups cannot be sent as-is.
+
+Delta islands solve this problem by allowing you to group your refs into
+distinct "islands". Pack-objects computes which objects are reachable
+from which islands, and refuses to make a delta from an object `A`
+against a base which is not present in all of `A`'s islands. This
+results in slightly larger packs (because we miss some delta
+opportunities), but guarantees that a fetch of one island will not have
+to recompute deltas on the fly due to crossing island boundaries.
+
+When repacking with delta islands the delta window tends to get
+clogged with candidates that are forbidden by the config. Repacking
+with a big --window helps (and doesn't take as long as it otherwise
+might because we can reject some object pairs based on islands before
+doing any computation on the content).
+
+Islands are configured via the `pack.island` option, which can be
+specified multiple times. Each value is a left-anchored regular
+expressions matching refnames. For example:
+
+-------------------------------------------
+[pack]
+island = refs/heads/
+island = refs/tags/
+-------------------------------------------
+
+puts heads and tags into an island (whose name is the empty string; see
+below for more on naming). Any refs which do not match those regular
+expressions (e.g., `refs/pull/123`) is not in any island. Any object
+which is reachable only from `refs/pull/` (but not heads or tags) is
+therefore not a candidate to be used as a base for `refs/heads/`.
+
+Refs are grouped into islands based on their "names", and two regexes
+that produce the same name are considered to be in the same
+island. The names are computed from the regexes by concatenating any
+capture groups from the regex, with a '-' dash in between. (And if
+there are no capture groups, then the name is the empty string, as in
+the above example.) This allows you to create arbitrary numbers of
+islands. Only up to 14 such capture groups are supported though.
+
+For example, imagine you store the refs for each fork in
+`refs/virtual/ID`, where `ID` is a numeric identifier. You might then
+configure:
+
+-------------------------------------------
+[pack]
+island = refs/virtual/([0-9]+)/heads/
+island = refs/virtual/([0-9]+)/tags/
+island = refs/virtual/([0-9]+)/(pull)/
+-------------------------------------------
+
+That puts the heads and tags for each fork in their own island (named
+"1234" or similar), and the pull refs for each go into their own
+"1234-pull".
+
+Note that we pick a single island for each regex to go into, using "last
+one wins" ordering (which allows repo-specific config to take precedence
+over user-wide config, and so forth).
+
SEE ALSO
--------
linkgit:git-rev-list[1]
diff --git a/Documentation/git-push.txt b/Documentation/git-push.txt
index 55277a9781..a5fc54aeab 100644
--- a/Documentation/git-push.txt
+++ b/Documentation/git-push.txt
@@ -74,22 +74,57 @@ without any `<refspec>` on the command line. Otherwise, missing
`:<dst>` means to update the same ref as the `<src>`.
+
The object referenced by <src> is used to update the <dst> reference
-on the remote side. By default this is only allowed if <dst> is not
-a tag (annotated or lightweight), and then only if it can fast-forward
-<dst>. By having the optional leading `+`, you can tell Git to update
-the <dst> ref even if it is not allowed by default (e.g., it is not a
-fast-forward.) This does *not* attempt to merge <src> into <dst>. See
-EXAMPLES below for details.
-+
-`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`.
-+
-Pushing an empty <src> allows you to delete the <dst> ref from
-the remote repository.
+on the remote side. Whether this is allowed depends on where in
+`refs/*` the <dst> reference lives as described in detail below, in
+those sections "update" means any modifications except deletes, which
+as noted after the next few sections are treated differently.
++
+The `refs/heads/*` namespace will only accept commit objects, and
+updates only if they can be fast-forwarded.
++
+The `refs/tags/*` namespace will accept any kind of object (as
+commits, trees and blobs can be tagged), and any updates to them will
+be rejected.
++
+It's possible to push any type of object to any namespace outside of
+`refs/{tags,heads}/*`. In the case of tags and commits, these will be
+treated as if they were the commits inside `refs/heads/*` for the
+purposes of whether the update is allowed.
++
+I.e. a fast-forward of commits and tags outside `refs/{tags,heads}/*`
+is allowed, even in cases where what's being fast-forwarded is not a
+commit, but a tag object which happens to point to a new commit which
+is a fast-forward of the commit the last tag (or commit) it's
+replacing. Replacing a tag with an entirely different tag is also
+allowed, if it points to the same commit, as well as pushing a peeled
+tag, i.e. pushing the commit that existing tag object points to, or a
+new tag object which an existing commit points to.
++
+Tree and blob objects outside of `refs/{tags,heads}/*` will be treated
+the same way as if they were inside `refs/tags/*`, any update of them
+will be rejected.
++
+All of the rules described above about what's not allowed as an update
+can be overridden by adding an the optional leading `+` to a refspec
+(or using `--force` command line option). The only exception to this
+is that no amount of forcing will make the `refs/heads/*` namespace
+accept a non-commit object. Hooks and configuration can also override
+or amend these rules, see e.g. `receive.denyNonFastForwards` in
+linkgit:git-config[1] and `pre-receive` and `update` in
+linkgit:githooks[5].
++
+Pushing an empty <src> allows you to delete the <dst> ref from the
+remote repository. Deletions are always accepted without a leading `+`
+in the refspec (or `--force`), except when forbidden by configuration
+or hooks. See `receive.denyDeletes` in linkgit:git-config[1] and
+`pre-receive` and `update` in linkgit:githooks[5].
+
The special refspec `:` (or `+:` to allow non-fast-forward updates)
directs Git to push "matching" branches: for every branch that exists on
the local side, the remote side is updated if a branch of the same name
already exists on the remote side.
++
+`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`.
--all::
Push all branches (i.e. refs under `refs/heads/`); cannot be
diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt
index d056250968..aa0cc8bd44 100644
--- a/Documentation/git-repack.txt
+++ b/Documentation/git-repack.txt
@@ -160,6 +160,11 @@ depth is 4095.
being removed. In addition, any unreachable loose objects will
be packed (and their loose counterparts removed).
+-i::
+--delta-islands::
+ Pass the `--delta-islands` option to `git-pack-objects`, see
+ linkgit:git-pack-objects[1].
+
Configuration
-------------
diff --git a/Documentation/git-rerere.txt b/Documentation/git-rerere.txt
index 031f31fa47..df310d2a58 100644
--- a/Documentation/git-rerere.txt
+++ b/Documentation/git-rerere.txt
@@ -211,6 +211,12 @@ would conflict the same way as the test merge you resolved earlier.
'git rerere' will be run by 'git rebase' to help you resolve this
conflict.
+[NOTE] 'git rerere' relies on the conflict markers in the file to
+detect the conflict. If the file already contains lines that look the
+same as lines with conflict markers, 'git rerere' may fail to record a
+conflict resolution. To work around this, the `conflict-marker-size`
+setting in linkgit:gitattributes[5] can be used.
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/Documentation/git-worktree.txt b/Documentation/git-worktree.txt
index 29a5b7e252..e2ee9fc21b 100644
--- a/Documentation/git-worktree.txt
+++ b/Documentation/git-worktree.txt
@@ -120,8 +120,16 @@ OPTIONS
--force::
By default, `add` refuses to create a new working tree when
`<commit-ish>` is a branch name and is already checked out by
- another working tree and `remove` refuses to remove an unclean
- working tree. This option overrides these safeguards.
+ another working tree, or if `<path>` is already assigned to some
+ working tree but is missing (for instance, if `<path>` was deleted
+ manually). This option overrides these safeguards. To add a missing but
+ locked working tree path, specify `--force` twice.
++
+`move` refuses to move a locked working tree unless `--force` is specified
+twice.
++
+`remove` refuses to remove an unclean working tree unless `--force` is used.
+To remove a locked working tree, specify `--force` twice.
-b <new-branch>::
-B <new-branch>::
diff --git a/Documentation/git.txt b/Documentation/git.txt
index dba7f0c18e..74a9d7edb4 100644
--- a/Documentation/git.txt
+++ b/Documentation/git.txt
@@ -599,8 +599,8 @@ trace messages into this file descriptor.
+
Alternatively, if the variable is set to an absolute path
(starting with a '/' character), Git will interpret this
-as a file path and will try to write the trace messages
-into it.
+as a file path and will try to append the trace messages
+to it.
+
Unsetting the variable, or setting it to empty, "0" or
"false" (case insensitive) disables trace messages.
diff --git a/Documentation/gitrevisions.txt b/Documentation/gitrevisions.txt
index 1f6cceaefb..d407b7dee1 100644
--- a/Documentation/gitrevisions.txt
+++ b/Documentation/gitrevisions.txt
@@ -19,9 +19,10 @@ walk the revision graph (such as linkgit:git-log[1]), all commits which are
reachable from that commit. For commands that walk the revision graph one can
also specify a range of revisions explicitly.
-In addition, some Git commands (such as linkgit:git-show[1]) also take
-revision parameters which denote other objects than commits, e.g. blobs
-("files") or trees ("directories of files").
+In addition, some Git commands (such as linkgit:git-show[1] and
+linkgit:git-push[1]) can also take revision parameters which denote
+other objects than commits, e.g. blobs ("files") or trees
+("directories of files").
include::revisions.txt[]
diff --git a/Documentation/pull-fetch-param.txt b/Documentation/pull-fetch-param.txt
index f1fb08dc68..7d3a60f5b9 100644
--- a/Documentation/pull-fetch-param.txt
+++ b/Documentation/pull-fetch-param.txt
@@ -33,11 +33,40 @@ name.
it requests fetching everything up to the given tag.
+
The remote ref that matches <src>
-is fetched, and if <dst> is not an empty string, the local
-ref that matches it is fast-forwarded using <src>.
-If the optional plus `+` is used, the local ref
-is updated even if it does not result in a fast-forward
-update.
+is fetched, and if <dst> is not an empty string, an attempt
+is made to update the local ref that matches it.
++
+Whether that update is allowed without `--force` depends on the ref
+namespace it's being fetched to, the type of object being fetched, and
+whether the update is considered to be a fast-forward. Generally, the
+same rules apply for fetching as when pushing, see the `<refspec>...`
+section of linkgit:git-push[1] for what those are. Exceptions to those
+rules particular to 'git fetch' are noted below.
++
+Until Git version 2.20, and unlike when pushing with
+linkgit:git-push[1], any updates to `refs/tags/*` would be accepted
+without `+` in the refspec (or `--force`). When fetching, we promiscuously
+considered all tag updates from a remote to be forced fetches. Since
+Git version 2.20, fetching to update `refs/tags/*` works the same way
+as when pushing. I.e. any updates will be rejected without `+` in the
+refspec (or `--force`).
++
+Unlike when pushing with linkgit:git-push[1], any updates outside of
+`refs/{tags,heads}/*` will be accepted without `+` in the refspec (or
+`--force`), whether that's swapping e.g. a tree object for a blob, or
+a commit for another commit that's doesn't have the previous commit as
+an ancestor etc.
++
+Unlike when pushing with linkgit:git-push[1], there is no
+configuration which'll amend these rules, and nothing like a
+`pre-fetch` hook analogous to the `pre-receive` hook.
++
+As with pushing with linkgit:git-push[1], all of the rules described
+above about what's not allowed as an update can be overridden by
+adding an the optional leading `+` to a refspec (or using `--force`
+command line option). The only exception to this is that no amount of
+forcing will make the `refs/heads/*` namespace accept a non-commit
+object.
+
[NOTE]
When the remote branch you want to fetch is known to
diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt
new file mode 100644
index 0000000000..d7e57639f7
--- /dev/null
+++ b/Documentation/technical/multi-pack-index.txt
@@ -0,0 +1,109 @@
+Multi-Pack-Index (MIDX) Design Notes
+====================================
+
+The Git object directory contains a 'pack' directory containing
+packfiles (with suffix ".pack") and pack-indexes (with suffix
+".idx"). The pack-indexes provide a way to lookup objects and
+navigate to their offset within the pack, but these must come
+in pairs with the packfiles. This pairing depends on the file
+names, as the pack-index differs only in suffix with its pack-
+file. While the pack-indexes provide fast lookup per packfile,
+this performance degrades as the number of packfiles increases,
+because abbreviations need to inspect every packfile and we are
+more likely to have a miss on our most-recently-used packfile.
+For some large repositories, repacking into a single packfile
+is not feasible due to storage space or excessive repack times.
+
+The multi-pack-index (MIDX for short) stores a list of objects
+and their offsets into multiple packfiles. It contains:
+
+- A list of packfile names.
+- A sorted list of object IDs.
+- A list of metadata for the ith object ID including:
+ - A value j referring to the jth packfile.
+ - An offset within the jth packfile for the object.
+- If large offsets are required, we use another list of large
+ offsets similar to version 2 pack-indexes.
+
+Thus, we can provide O(log N) lookup time for any number
+of packfiles.
+
+Design Details
+--------------
+
+- The MIDX is stored in a file named 'multi-pack-index' in the
+ .git/objects/pack directory. This could be stored in the pack
+ directory of an alternate. It refers only to packfiles in that
+ same directory.
+
+- The pack.multiIndex config setting must be on to consume MIDX files.
+
+- The file format includes parameters for the object ID hash
+ function, so a future change of hash algorithm does not require
+ a change in format.
+
+- The MIDX keeps only one record per object ID. If an object appears
+ in multiple packfiles, then the MIDX selects the copy in the most-
+ recently modified packfile.
+
+- If there exist packfiles in the pack directory not registered in
+ the MIDX, then those packfiles are loaded into the `packed_git`
+ list and `packed_git_mru` cache.
+
+- The pack-indexes (.idx files) remain in the pack directory so we
+ can delete the MIDX file, set core.midx to false, or downgrade
+ without any loss of information.
+
+- The MIDX file format uses a chunk-based approach (similar to the
+ commit-graph file) that allows optional data to be added.
+
+Future Work
+-----------
+
+- Add a 'verify' subcommand to the 'git midx' builtin to verify the
+ contents of the multi-pack-index file match the offsets listed in
+ the corresponding pack-indexes.
+
+- The multi-pack-index allows many packfiles, especially in a context
+ where repacking is expensive (such as a very large repo), or
+ unexpected maintenance time is unacceptable (such as a high-demand
+ build machine). However, the multi-pack-index needs to be rewritten
+ in full every time. We can extend the format to be incremental, so
+ writes are fast. By storing a small "tip" multi-pack-index that
+ points to large "base" MIDX files, we can keep writes fast while
+ still reducing the number of binary searches required for object
+ lookups.
+
+- The reachability bitmap is currently paired directly with a single
+ packfile, using the pack-order as the object order to hopefully
+ compress the bitmaps well using run-length encoding. This could be
+ extended to pair a reachability bitmap with a multi-pack-index. If
+ the multi-pack-index is extended to store a "stable object order"
+ (a function Order(hash) = integer that is constant for a given hash,
+ even as the multi-pack-index is updated) then a reachability bitmap
+ could point to a multi-pack-index and be updated independently.
+
+- Packfiles can be marked as "special" using empty files that share
+ the initial name but replace ".pack" with ".keep" or ".promisor".
+ We can add an optional chunk of data to the multi-pack-index that
+ records flags of information about the packfiles. This allows new
+ states, such as 'repacked' or 'redeltified', that can help with
+ pack maintenance in a multi-pack environment. It may also be
+ helpful to organize packfiles by object type (commit, tree, blob,
+ etc.) and use this metadata to help that maintenance.
+
+- The partial clone feature records special "promisor" packs that
+ may point to objects that are not stored locally, but available
+ on request to a server. The multi-pack-index does not currently
+ track these promisor packs.
+
+Related Links
+-------------
+[0] https://bugs.chromium.org/p/git/issues/detail?id=6
+ Chromium work item for: Multi-Pack Index (MIDX)
+
+[1] https://public-inbox.org/git/20180107181459.222909-1-dstolee@microsoft.com/
+ An earlier RFC for the multi-pack-index feature
+
+[2] https://public-inbox.org/git/alpine.DEB.2.20.1803091557510.23109@alexmv-linux/
+ Git Merge 2018 Contributor's summit notes (includes discussion of MIDX)
diff --git a/Documentation/technical/pack-format.txt b/Documentation/technical/pack-format.txt
index 70a99fd142..cab5bdd2ff 100644
--- a/Documentation/technical/pack-format.txt
+++ b/Documentation/technical/pack-format.txt
@@ -252,3 +252,80 @@ Pack file entry: <+
corresponding packfile.
20-byte SHA-1-checksum of all of the above.
+
+== multi-pack-index (MIDX) files have the following format:
+
+The multi-pack-index files refer to multiple pack-files and loose objects.
+
+In order to allow extensions that add extra data to the MIDX, we organize
+the body into "chunks" and provide a lookup table at the beginning of the
+body. The header includes certain length values, such as the number of packs,
+the number of base MIDX files, hash lengths and types.
+
+All 4-byte numbers are in network order.
+
+HEADER:
+
+ 4-byte signature:
+ The signature is: {'M', 'I', 'D', 'X'}
+
+ 1-byte version number:
+ Git only writes or recognizes version 1.
+
+ 1-byte Object Id Version
+ Git only writes or recognizes version 1 (SHA1).
+
+ 1-byte number of "chunks"
+
+ 1-byte number of base multi-pack-index files:
+ This value is currently always zero.
+
+ 4-byte number of pack files
+
+CHUNK LOOKUP:
+
+ (C + 1) * 12 bytes providing the chunk offsets:
+ First 4 bytes describe chunk id. Value 0 is a terminating label.
+ Other 8 bytes provide offset in current file for chunk to start.
+ (Chunks are provided in file-order, so you can infer the length
+ using the next chunk position if necessary.)
+
+ The remaining data in the body is described one chunk at a time, and
+ these chunks may be given in any order. Chunks are required unless
+ otherwise specified.
+
+CHUNK DATA:
+
+ Packfile Names (ID: {'P', 'N', 'A', 'M'})
+ Stores the packfile names as concatenated, null-terminated strings.
+ Packfiles must be listed in lexicographic order for fast lookups by
+ name. This is the only chunk not guaranteed to be a multiple of four
+ bytes in length, so should be the last chunk for alignment reasons.
+
+ OID Fanout (ID: {'O', 'I', 'D', 'F'})
+ The ith entry, F[i], stores the number of OIDs with first
+ byte at most i. Thus F[255] stores the total
+ number of objects.
+
+ OID Lookup (ID: {'O', 'I', 'D', 'L'})
+ The OIDs for all objects in the MIDX are stored in lexicographic
+ order in this chunk.
+
+ Object Offsets (ID: {'O', 'O', 'F', 'F'})
+ Stores two 4-byte values for every object.
+ 1: The pack-int-id for the pack storing this object.
+ 2: The offset within the pack.
+ If all offsets are less than 2^31, then the large offset chunk
+ will not exist and offsets are stored as in IDX v1.
+ If there is at least one offset value larger than 2^32-1, then
+ the large offset chunk must exist. If the large offset chunk
+ exists and the 31st bit is on, then removing that bit reveals
+ the row in the large offsets containing the 8-byte offset of
+ this object.
+
+ [Optional] Object Large Offsets (ID: {'L', 'O', 'F', 'F'})
+ 8-byte offsets into large packfiles.
+
+TRAILER:
+
+ 20-byte SHA1-checksum of the above contents.
diff --git a/Documentation/technical/rerere.txt b/Documentation/technical/rerere.txt
new file mode 100644
index 0000000000..aa22d7ace8
--- /dev/null
+++ b/Documentation/technical/rerere.txt
@@ -0,0 +1,186 @@
+Rerere
+======
+
+This document describes the rerere logic.
+
+Conflict normalization
+----------------------
+
+To ensure recorded conflict resolutions can be looked up in the rerere
+database, even when branches are merged in a different order,
+different branches are merged that result in the same conflict, or
+when different conflict style settings are used, rerere normalizes the
+conflicts before writing them to the rerere database.
+
+Different conflict styles and branch names are normalized by stripping
+the labels from the conflict markers, and removing the common ancestor
+version from the `diff3` conflict style. Branches that are merged
+in different order are normalized by sorting the conflict hunks. More
+on each of those steps in the following sections.
+
+Once these two normalization operations are applied, a conflict ID is
+calculated based on the normalized conflict, which is later used by
+rerere to look up the conflict in the rerere database.
+
+Removing the common ancestor version
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Say we have three branches AB, AC and AC2. The common ancestor of
+these branches has a file with a line containing the string "A" (for
+brevity this is called "line A" in the rest of the document). In
+branch AB this line is changed to "B", in AC, this line is changed to
+"C", and branch AC2 is forked off of AC, after the line was changed to
+"C".
+
+Forking a branch ABAC off of branch AB and then merging AC into it, we
+get a conflict like the following:
+
+ <<<<<<< HEAD
+ B
+ =======
+ C
+ >>>>>>> AC
+
+Doing the analogous with AC2 (forking a branch ABAC2 off of branch AB
+and then merging branch AC2 into it), using the diff3 conflict style,
+we get a conflict like the following:
+
+ <<<<<<< HEAD
+ B
+ ||||||| merged common ancestors
+ A
+ =======
+ C
+ >>>>>>> AC2
+
+By resolving this conflict, to leave line D, the user declares:
+
+ After examining what branches AB and AC did, I believe that making
+ line A into line D is the best thing to do that is compatible with
+ what AB and AC wanted to do.
+
+As branch AC2 refers to the same commit as AC, the above implies that
+this is also compatible what AB and AC2 wanted to do.
+
+By extension, this means that rerere should recognize that the above
+conflicts are the same. To do this, the labels on the conflict
+markers are stripped, and the common ancestor version is removed. The above
+examples would both result in the following normalized conflict:
+
+ <<<<<<<
+ B
+ =======
+ C
+ >>>>>>>
+
+Sorting hunks
+~~~~~~~~~~~~~
+
+As before, lets imagine that a common ancestor had a file with line A
+its early part, and line X in its late part. And then four branches
+are forked that do these things:
+
+ - AB: changes A to B
+ - AC: changes A to C
+ - XY: changes X to Y
+ - XZ: changes X to Z
+
+Now, forking a branch ABAC off of branch AB and then merging AC into
+it, and forking a branch ACAB off of branch AC and then merging AB
+into it, would yield the conflict in a different order. The former
+would say "A became B or C, what now?" while the latter would say "A
+became C or B, what now?"
+
+As a reminder, the act of merging AC into ABAC and resolving the
+conflict to leave line D means that the user declares:
+
+ After examining what branches AB and AC did, I believe that
+ making line A into line D is the best thing to do that is
+ compatible with what AB and AC wanted to do.
+
+So the conflict we would see when merging AB into ACAB should be
+resolved the same way---it is the resolution that is in line with that
+declaration.
+
+Imagine that similarly previously a branch XYXZ was forked from XY,
+and XZ was merged into it, and resolved "X became Y or Z" into "X
+became W".
+
+Now, if a branch ABXY was forked from AB and then merged XY, then ABXY
+would have line B in its early part and line Y in its later part.
+Such a merge would be quite clean. We can construct 4 combinations
+using these four branches ((AB, AC) x (XY, XZ)).
+
+Merging ABXY and ACXZ would make "an early A became B or C, a late X
+became Y or Z" conflict, while merging ACXY and ABXZ would make "an
+early A became C or B, a late X became Y or Z". We can see there are
+4 combinations of ("B or C", "C or B") x ("X or Y", "Y or X").
+
+By sorting, the conflict is given its canonical name, namely, "an
+early part became B or C, a late part becames X or Y", and whenever
+any of these four patterns appear, and we can get to the same conflict
+and resolution that we saw earlier.
+
+Without the sorting, we'd have to somehow find a previous resolution
+from combinatorial explosion.
+
+Conflict ID calculation
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Once the conflict normalization is done, the conflict ID is calculated
+as the sha1 hash of the conflict hunks appended to each other,
+separated by <NUL> characters. The conflict markers are stripped out
+before the sha1 is calculated. So in the example above, where we
+merge branch AC which changes line A to line C, into branch AB, which
+changes line A to line C, the conflict ID would be
+SHA1('B<NUL>C<NUL>').
+
+If there are multiple conflicts in one file, the sha1 is calculated
+the same way with all hunks appended to each other, in the order in
+which they appear in the file, separated by a <NUL> character.
+
+Nested conflicts
+~~~~~~~~~~~~~~~~
+
+Nested conflicts are handled very similarly to "simple" conflicts.
+Similar to simple conflicts, the conflict is first normalized by
+stripping the labels from conflict markers, stripping the common ancestor
+version, and the sorting the conflict hunks, both for the outer and the
+inner conflict. This is done recursively, so any number of nested
+conflicts can be handled.
+
+Note that this only works for conflict markers that "cleanly nest". If
+there are any unmatched conflict markers, rerere will fail to handle
+the conflict and record a conflict resolution.
+
+The only difference is in how the conflict ID is calculated. For the
+inner conflict, the conflict markers themselves are not stripped out
+before calculating the sha1.
+
+Say we have the following conflict for example:
+
+ <<<<<<< HEAD
+ 1
+ =======
+ <<<<<<< HEAD
+ 3
+ =======
+ 2
+ >>>>>>> branch-2
+ >>>>>>> branch-3~
+
+After stripping out the labels of the conflict markers, and sorting
+the hunks, the conflict would look as follows:
+
+ <<<<<<<
+ 1
+ =======
+ <<<<<<<
+ 2
+ =======
+ 3
+ >>>>>>>
+ >>>>>>>
+
+and finally the conflict ID would be calculated as:
+`sha1('1<NUL><<<<<<<\n3\n=======\n2\n>>>>>>><NUL>')`
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index e9dc8f7a01..498fce8b64 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.19.0
+DEF_VER=v2.19.GIT
LF='
'
diff --git a/Makefile b/Makefile
index 5a969f5830..13e1c52478 100644
--- a/Makefile
+++ b/Makefile
@@ -722,7 +722,9 @@ TEST_BUILTINS_OBJS += test-mktemp.o
TEST_BUILTINS_OBJS += test-online-cpus.o
TEST_BUILTINS_OBJS += test-path-utils.o
TEST_BUILTINS_OBJS += test-prio-queue.o
+TEST_BUILTINS_OBJS += test-reach.o
TEST_BUILTINS_OBJS += test-read-cache.o
+TEST_BUILTINS_OBJS += test-read-midx.o
TEST_BUILTINS_OBJS += test-ref-store.o
TEST_BUILTINS_OBJS += test-regex.o
TEST_BUILTINS_OBJS += test-repository.o
@@ -738,6 +740,7 @@ TEST_BUILTINS_OBJS += test-submodule-config.o
TEST_BUILTINS_OBJS += test-subprocess.o
TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
TEST_BUILTINS_OBJS += test-wildmatch.o
+TEST_BUILTINS_OBJS += test-windows-named-pipe.o
TEST_BUILTINS_OBJS += test-write-cache.o
TEST_PROGRAMS_NEED_X += test-dump-fsmonitor
@@ -835,6 +838,7 @@ LIB_OBJS += column.o
LIB_OBJS += combine-diff.o
LIB_OBJS += commit.o
LIB_OBJS += commit-graph.o
+LIB_OBJS += commit-reach.o
LIB_OBJS += compat/obstack.o
LIB_OBJS += compat/terminal.o
LIB_OBJS += config.o
@@ -847,6 +851,7 @@ LIB_OBJS += csum-file.o
LIB_OBJS += ctype.o
LIB_OBJS += date.o
LIB_OBJS += decorate.o
+LIB_OBJS += delta-islands.o
LIB_OBJS += diffcore-break.o
LIB_OBJS += diffcore-delta.o
LIB_OBJS += diffcore-order.o
@@ -880,6 +885,7 @@ LIB_OBJS += linear-assignment.o
LIB_OBJS += help.o
LIB_OBJS += hex.o
LIB_OBJS += ident.o
+LIB_OBJS += interdiff.o
LIB_OBJS += json-writer.o
LIB_OBJS += kwset.o
LIB_OBJS += levenshtein.o
@@ -900,6 +906,7 @@ LIB_OBJS += merge.o
LIB_OBJS += merge-blobs.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += mergesort.o
+LIB_OBJS += midx.o
LIB_OBJS += name-hash.o
LIB_OBJS += negotiator/default.o
LIB_OBJS += negotiator/skipping.o
@@ -1060,6 +1067,7 @@ BUILTIN_OBJS += builtin/merge-recursive.o
BUILTIN_OBJS += builtin/merge-tree.o
BUILTIN_OBJS += builtin/mktag.o
BUILTIN_OBJS += builtin/mktree.o
+BUILTIN_OBJS += builtin/multi-pack-index.o
BUILTIN_OBJS += builtin/mv.o
BUILTIN_OBJS += builtin/name-rev.o
BUILTIN_OBJS += builtin/notes.o
diff --git a/RelNotes b/RelNotes
index 5d139ba7f1..46e159c68d 120000..100644
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1,112 @@
-Documentation/RelNotes/2.19.0.txt \ No newline at end of file
+Git Release Notes
+=================
+
+Backward Compatibility Notes
+----------------------------
+
+ * "git branch -l <foo>" used to be a way to ask a reflog to be
+ created while creating a new branch, but that is no longer the
+ case. It is a short-hand for "git branch --list <foo>" now.
+
+ * "git push" into refs/tags/* hierarchy is rejected without getting
+ forced, but "git fetch" (misguidedly) used the "fast forwarding"
+ rule used for the refs/heads/* hierarchy; this has been corrected,
+ which means some fetches of tags that did not fail with older
+ version of Git will fail without "--force" with this version.
+
+
+Updates since v2.19
+-------------------
+
+UI, Workflows & Features
+
+ * Running "git clone" against a project that contain two files with
+ pathnames that differ only in cases on a case insensitive
+ filesystem would result in one of the files lost because the
+ underlying filesystem is incapable of holding both at the same
+ time. An attempt is made to detect such a case and warn.
+
+ * "git checkout -b newbranch [HEAD]" should not have to do as much as
+ checking out a commit different from HEAD. An attempt is made to
+ optimize this special case.
+
+ * "git rev-list --stdin </dev/null" used to be an error; it now shows
+ no output without an error. "git rev-list --stdin --default HEAD"
+ still falls back to the given default when nothing is given on the
+ standard input.
+
+ * Lift code from GitHub to restrict delta computation so that an
+ object that exists in one fork is not made into a delta against
+ another object that does not appear in the same forked repository.
+
+ * "git format-patch" learned new "--interdiff" and "--range-diff"
+ options to explain the difference between this version and the
+ previous attempt in the cover letter (or after the tree-dashes as
+ a comment).
+
+ * "git mailinfo" used in "git am" learned to make a best-effort
+ recovery of a patch corrupted by MUA that sends text/plain with
+ format=flawed option.
+ (merge 3aa4d81f88 rs/mailinfo-format-flowed later to maint).
+
+ * The rules used by "git push" and "git fetch" to determine if a ref
+ can or cannot be updated were inconsistent; specifically, fetching
+ to update existing tags were allowed even though tags are supposed
+ to be unmoving anchoring points. "git fetch" was taught to forbid
+ updates to existing tags without the "--force" option.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * When there are too many packfiles in a repository (which is not
+ recommended), looking up an object in these would require
+ consulting many pack .idx files; a new mechanism to have a single
+ file that consolidates all of these .idx files is introduced.
+
+ * "git submodule update" is getting rewritten piece-by-piece into C.
+
+ * The code for computing history reachability has been shuffled,
+ obtained a bunch of new tests to cover them, and then being
+ improved.
+
+ * The unpack_trees() API used in checking out a branch and merging
+ walks one or more trees along with the index. When the cache-tree
+ in the index tells us that we are walking a tree whose flattened
+ contents is known (i.e. matches a span in the index), as linearly
+ scanning a span in the index is much more efficient than having to
+ open tree objects recursively and listing their entries, the walk
+ can be optimized, which has been done.
+
+ * When creating a thin pack, which allows objects to be made into a
+ delta against another object that is not in the resulting pack but
+ is known to be present on the receiving end, the code learned to
+ take advantage of the reachability bitmap; this allows the server
+ to send a delta against a base beyond the "boundary" commit.
+
+ * spatch transformation to replace boolean uses of !hashcmp() to
+ newly introduced oideq() is added, and applied, to regain
+ performance lost due to support of multiple hash algorithms.
+
+ * Fix a bug in which the same path could be registered under multiple
+ worktree entries if the path was missing (for instance, was removed
+ manually). Also, as a convenience, expand the number of cases in
+ which --force is applicable.
+
+
+Fixes since v2.19
+-----------------
+
+ * "git interpret-trailers" and its underlying machinery had a buggy
+ code that attempted to ignore patch text after commit log message,
+ which triggered in various codepaths that will always get the log
+ message alone and never get such an input.
+ (merge 66e83d9b41 jk/trailer-fixes later to maint).
+
+ * Malformed or crafted data in packstream can make our code attempt
+ to read or write past the allocated buffer and abort, instead of
+ reporting an error, which has been fixed.
+
+ * Code cleanup, docfix, build fix, etc.
+ (merge 96a7501aad ts/doc-build-manpage-xsl-quietly later to maint).
+ (merge b9b07efdb2 tg/conflict-marker-size later to maint).
+ (merge fa0aeea770 sg/doc-trace-appends later to maint).
diff --git a/archive.c b/archive.c
index 0a07b140fe..c1870105eb 100644
--- a/archive.c
+++ b/archive.c
@@ -110,7 +110,8 @@ static const struct attr_check *get_archive_attrs(struct index_state *istate,
static struct attr_check *check;
if (!check)
check = attr_check_initl("export-ignore", "export-subst", NULL);
- return git_check_attr(istate, path, check) ? NULL : check;
+ git_check_attr(istate, path, check);
+ return check;
}
static int check_attr_export_ignore(const struct attr_check *check)
diff --git a/attr.c b/attr.c
index 98e4953f6e..60d284796d 100644
--- a/attr.c
+++ b/attr.c
@@ -1143,9 +1143,9 @@ static void collect_some_attrs(const struct index_state *istate,
fill(path, pathlen, basename_offset, check->stack, check->all_attrs, rem);
}
-int git_check_attr(const struct index_state *istate,
- const char *path,
- struct attr_check *check)
+void git_check_attr(const struct index_state *istate,
+ const char *path,
+ struct attr_check *check)
{
int i;
@@ -1158,8 +1158,6 @@ int git_check_attr(const struct index_state *istate,
value = ATTR__UNSET;
check->items[i].value = value;
}
-
- return 0;
}
void git_all_attrs(const struct index_state *istate,
diff --git a/attr.h b/attr.h
index 2be86db36e..b0378bfe5f 100644
--- a/attr.h
+++ b/attr.h
@@ -63,8 +63,8 @@ void attr_check_free(struct attr_check *check);
*/
const char *git_attr_name(const struct git_attr *);
-int git_check_attr(const struct index_state *istate,
- const char *path, struct attr_check *check);
+void git_check_attr(const struct index_state *istate,
+ const char *path, struct attr_check *check);
/*
* Retrieve all attributes that apply to the specified path.
diff --git a/bisect.c b/bisect.c
index e1275ba79e..e8b17cf7e1 100644
--- a/bisect.c
+++ b/bisect.c
@@ -13,6 +13,8 @@
#include "sha1-array.h"
#include "argv-array.h"
#include "commit-slab.h"
+#include "commit-reach.h"
+#include "object-store.h"
static struct oid_array good_revs;
static struct oid_array skipped_revs;
@@ -120,14 +122,14 @@ static inline int halfway(struct commit_list *p, int nr)
}
}
-#if !DEBUG_BISECT
-#define show_list(a,b,c,d) do { ; } while (0)
-#else
static void show_list(const char *debug, int counted, int nr,
struct commit_list *list)
{
struct commit_list *p;
+ if (!DEBUG_BISECT)
+ return;
+
fprintf(stderr, "%s (%d/%d)\n", debug, counted, nr);
for (p = list; p; p = p->next) {
@@ -145,7 +147,7 @@ static void show_list(const char *debug, int counted, int nr,
(flags & TREESAME) ? ' ' : 'T',
(flags & UNINTERESTING) ? 'U' : ' ',
(flags & COUNTED) ? 'C' : ' ');
- if (commit->util)
+ if (*commit_weight_at(&commit_weight, p->item))
fprintf(stderr, "%3d", weight(p));
else
fprintf(stderr, "---");
@@ -160,7 +162,6 @@ static void show_list(const char *debug, int counted, int nr,
fprintf(stderr, "\n");
}
}
-#endif /* DEBUG_BISECT */
static struct commit_list *best_bisection(struct commit_list *list, int nr)
{
@@ -595,7 +596,7 @@ static struct commit_list *skip_away(struct commit_list *list, int count)
for (i = 0; cur; cur = cur->next, i++) {
if (i == index) {
- if (oidcmp(&cur->item->object.oid, current_bad_oid))
+ if (!oideq(&cur->item->object.oid, current_bad_oid))
return cur;
if (previous)
return previous;
@@ -807,7 +808,7 @@ static void check_merge_bases(int rev_nr, struct commit **rev, int no_checkout)
for (; result; result = result->next) {
const struct object_id *mb = &result->item->object.oid;
- if (!oidcmp(mb, current_bad_oid)) {
+ if (oideq(mb, current_bad_oid)) {
handle_bad_merge_base();
} else if (0 <= oid_array_lookup(&good_revs, mb)) {
continue;
@@ -988,7 +989,7 @@ int bisect_next_all(const char *prefix, int no_checkout)
bisect_rev = &revs.commits->item->object.oid;
- if (!oidcmp(bisect_rev, current_bad_oid)) {
+ if (oideq(bisect_rev, current_bad_oid)) {
exit_if_skipped_commits(tried, current_bad_oid);
printf("%s is the first %s commit\n", oid_to_hex(bisect_rev),
term_bad);
diff --git a/blame.c b/blame.c
index aca06f4b12..d5f7b7237c 100644
--- a/blame.c
+++ b/blame.c
@@ -1457,14 +1457,14 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin,
porigin = find(p, origin);
if (!porigin)
continue;
- if (!oidcmp(&porigin->blob_oid, &origin->blob_oid)) {
+ if (oideq(&porigin->blob_oid, &origin->blob_oid)) {
pass_whole_blame(sb, origin, porigin);
blame_origin_decref(porigin);
goto finish;
}
for (j = same = 0; j < i; j++)
if (sg_origin[j] &&
- !oidcmp(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {
+ oideq(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {
same = 1;
break;
}
@@ -1832,7 +1832,7 @@ void setup_scoreboard(struct blame_scoreboard *sb,
sb->revs->children.name = "children";
while (c->parents &&
- oidcmp(&c->object.oid, &sb->final->object.oid)) {
+ !oideq(&c->object.oid, &sb->final->object.oid)) {
struct commit_list *l = xcalloc(1, sizeof(*l));
l->item = c;
@@ -1842,7 +1842,7 @@ void setup_scoreboard(struct blame_scoreboard *sb,
c = c->parents->item;
}
- if (oidcmp(&c->object.oid, &sb->final->object.oid))
+ if (!oideq(&c->object.oid, &sb->final->object.oid))
die(_("--reverse --first-parent together require range along first-parent chain"));
}
diff --git a/builtin.h b/builtin.h
index 99206df4bd..962f0489ab 100644
--- a/builtin.h
+++ b/builtin.h
@@ -191,6 +191,7 @@ extern int cmd_merge_recursive(int argc, const char **argv, const char *prefix);
extern int cmd_merge_tree(int argc, const char **argv, const char *prefix);
extern int cmd_mktag(int argc, const char **argv, const char *prefix);
extern int cmd_mktree(int argc, const char **argv, const char *prefix);
+extern int cmd_multi_pack_index(int argc, const char **argv, const char *prefix);
extern int cmd_mv(int argc, const char **argv, const char *prefix);
extern int cmd_name_rev(int argc, const char **argv, const char *prefix);
extern int cmd_notes(int argc, const char **argv, const char *prefix);
diff --git a/builtin/add.c b/builtin/add.c
index 9916498a29..0b64bcdebe 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -454,7 +454,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
* Check the "pathspec '%s' did not match any files" block
* below before enabling new magic.
*/
- parse_pathspec(&pathspec, 0,
+ parse_pathspec(&pathspec, PATHSPEC_ATTR,
PATHSPEC_PREFER_FULL |
PATHSPEC_SYMLINK_LEADING_PATH,
prefix, argv);
diff --git a/builtin/am.c b/builtin/am.c
index 5e866d17c7..5e643e2a3e 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -1244,6 +1244,10 @@ static int parse_mail(struct am_state *state, const char *mail)
fclose(mi.input);
fclose(mi.output);
+ if (mi.format_flowed)
+ warning(_("Patch sent with format=flowed; "
+ "space at the end of lines might be lost."));
+
/* Extract message and author information */
fp = xfopen(am_path(state, "info"), "r");
while (!strbuf_getline_lf(&sb, fp)) {
@@ -2078,7 +2082,7 @@ static int safe_to_abort(const struct am_state *state)
if (get_oid("HEAD", &head))
oidclr(&head);
- if (!oidcmp(&head, &abort_safety))
+ if (oideq(&head, &abort_safety))
return 1;
warning(_("You seem to have moved HEAD since the last 'am' failure.\n"
diff --git a/builtin/branch.c b/builtin/branch.c
index bbd006aab4..c396c41533 100644
--- a/builtin/branch.c
+++ b/builtin/branch.c
@@ -23,6 +23,7 @@
#include "ref-filter.h"
#include "worktree.h"
#include "help.h"
+#include "commit-reach.h"
static const char * const builtin_branch_usage[] = {
N_("git branch [<options>] [-r | -a] [--merged | --no-merged]"),
@@ -37,7 +38,6 @@ static const char * const builtin_branch_usage[] = {
static const char *head;
static struct object_id head_oid;
-static int used_deprecated_reflog_option;
static int branch_use_color = -1;
static char branch_colors[][COLOR_MAXLEN] = {
@@ -578,14 +578,6 @@ static int edit_branch_description(const char *branch_name)
return 0;
}
-static int deprecated_reflog_option_cb(const struct option *opt,
- const char *arg, int unset)
-{
- used_deprecated_reflog_option = 1;
- *(int *)opt->value = !unset;
- return 0;
-}
-
int cmd_branch(int argc, const char **argv, const char *prefix)
{
int delete = 0, rename = 0, copy = 0, force = 0, list = 0;
@@ -627,14 +619,8 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
OPT_BIT('M', NULL, &rename, N_("move/rename a branch, even if target exists"), 2),
OPT_BIT('c', "copy", &copy, N_("copy a branch and its reflog"), 1),
OPT_BIT('C', NULL, &copy, N_("copy a branch, even if target exists"), 2),
- OPT_BOOL(0, "list", &list, N_("list branch names")),
+ OPT_BOOL('l', "list", &list, N_("list branch names")),
OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")),
- {
- OPTION_CALLBACK, 'l', NULL, &reflog, NULL,
- N_("deprecated synonym for --create-reflog"),
- PARSE_OPT_NOARG | PARSE_OPT_HIDDEN,
- deprecated_reflog_option_cb
- },
OPT_BOOL(0, "edit-description", &edit_description,
N_("edit the description for the branch")),
OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE),
@@ -707,11 +693,6 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
if (list)
setup_auto_pager("branch", 1);
- if (used_deprecated_reflog_option && !list) {
- warning("the '-l' alias for '--create-reflog' is deprecated;");
- warning("it will be removed in a future version of Git");
- }
-
if (delete) {
if (!argc)
die(_("branch name required"));
diff --git a/builtin/check-attr.c b/builtin/check-attr.c
index c05573ff9c..30a2f84274 100644
--- a/builtin/check-attr.c
+++ b/builtin/check-attr.c
@@ -65,8 +65,7 @@ static void check_attr(const char *prefix,
if (collect_all) {
git_all_attrs(&the_index, full_path, check);
} else {
- if (git_check_attr(&the_index, full_path, check))
- die("git_check_attr died");
+ git_check_attr(&the_index, full_path, check);
}
output_attr(check, file);
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 29ef50013d..b30b48767e 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -25,6 +25,8 @@
#include "submodule.h"
#include "advice.h"
+static int checkout_optimize_new_branch;
+
static const char * const checkout_usage[] = {
N_("git checkout [<options>] <branch>"),
N_("git checkout [<options>] [<branch>] -- <file>..."),
@@ -42,6 +44,10 @@ struct checkout_opts {
int ignore_skipworktree;
int ignore_other_worktrees;
int show_progress;
+ /*
+ * If new checkout options are added, skip_merge_working_tree
+ * should be updated accordingly.
+ */
const char *new_branch;
const char *new_branch_force;
@@ -96,7 +102,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
if (pos >= 0) {
struct cache_entry *old = active_cache[pos];
if (ce->ce_mode == old->ce_mode &&
- !oidcmp(&ce->oid, &old->oid)) {
+ oideq(&ce->oid, &old->oid)) {
old->ce_flags |= CE_UPDATE;
discard_cache_entry(ce);
return 0;
@@ -472,6 +478,98 @@ static void setup_branch_path(struct branch_info *branch)
branch->path = strbuf_detach(&buf, NULL);
}
+/*
+ * Skip merging the trees, updating the index and working directory if and
+ * only if we are creating a new branch via "git checkout -b <new_branch>."
+ */
+static int skip_merge_working_tree(const struct checkout_opts *opts,
+ const struct branch_info *old_branch_info,
+ const struct branch_info *new_branch_info)
+{
+ /*
+ * Do the merge if sparse checkout is on and the user has not opted in
+ * to the optimized behavior
+ */
+ if (core_apply_sparse_checkout && !checkout_optimize_new_branch)
+ return 0;
+
+ /*
+ * We must do the merge if we are actually moving to a new commit.
+ */
+ if (!old_branch_info->commit || !new_branch_info->commit ||
+ oidcmp(&old_branch_info->commit->object.oid, &new_branch_info->commit->object.oid))
+ return 0;
+
+ /*
+ * opts->patch_mode cannot be used with switching branches so is
+ * not tested here
+ */
+
+ /*
+ * opts->quiet only impacts output so doesn't require a merge
+ */
+
+ /*
+ * Honor the explicit request for a three-way merge or to throw away
+ * local changes
+ */
+ if (opts->merge || opts->force)
+ return 0;
+
+ /*
+ * --detach is documented as "updating the index and the files in the
+ * working tree" but this optimization skips those steps so fall through
+ * to the regular code path.
+ */
+ if (opts->force_detach)
+ return 0;
+
+ /*
+ * opts->writeout_stage cannot be used with switching branches so is
+ * not tested here
+ */
+
+ /*
+ * Honor the explicit ignore requests
+ */
+ if (!opts->overwrite_ignore || opts->ignore_skipworktree ||
+ opts->ignore_other_worktrees)
+ return 0;
+
+ /*
+ * opts->show_progress only impacts output so doesn't require a merge
+ */
+
+ /*
+ * If we aren't creating a new branch any changes or updates will
+ * happen in the existing branch. Since that could only be updating
+ * the index and working directory, we don't want to skip those steps
+ * or we've defeated any purpose in running the command.
+ */
+ if (!opts->new_branch)
+ return 0;
+
+ /*
+ * new_branch_force is defined to "create/reset and checkout a branch"
+ * so needs to go through the merge to do the reset
+ */
+ if (opts->new_branch_force)
+ return 0;
+
+ /*
+ * A new orphaned branch requrires the index and the working tree to be
+ * adjusted to <start_point>
+ */
+ if (opts->new_orphan_branch)
+ return 0;
+
+ /*
+ * Remaining variables are not checkout options but used to track state
+ */
+
+ return 1;
+}
+
static int merge_working_tree(const struct checkout_opts *opts,
struct branch_info *old_branch_info,
struct branch_info *new_branch_info,
@@ -846,10 +944,19 @@ static int switch_branches(const struct checkout_opts *opts,
parse_commit_or_die(new_branch_info->commit);
}
- ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
- if (ret) {
- free(path_to_free);
- return ret;
+ /* optimize the "checkout -b <new_branch> path */
+ if (skip_merge_working_tree(opts, &old_branch_info, new_branch_info)) {
+ if (!checkout_optimize_new_branch && !opts->quiet) {
+ if (read_cache_preload(NULL) < 0)
+ return error(_("index file corrupt"));
+ show_local_changes(&new_branch_info->commit->object, &opts->diff_options);
+ }
+ } else {
+ ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
+ if (ret) {
+ free(path_to_free);
+ return ret;
+ }
}
if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit)
@@ -864,6 +971,11 @@ static int switch_branches(const struct checkout_opts *opts,
static int git_checkout_config(const char *var, const char *value, void *cb)
{
+ if (!strcmp(var, "checkout.optimizenewbranch")) {
+ checkout_optimize_new_branch = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "diff.ignoresubmodules")) {
struct checkout_opts *opts = cb;
handle_ignore_submodules_arg(&opts->diff_options, value);
diff --git a/builtin/clone.c b/builtin/clone.c
index fd2c3ef090..15b142d646 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -748,6 +748,7 @@ static int checkout(int submodule_progress)
memset(&opts, 0, sizeof opts);
opts.update = 1;
opts.merge = 1;
+ opts.clone = 1;
opts.fn = oneway_merge;
opts.verbose_update = (option_verbosity >= 0);
opts.src_index = &the_index;
diff --git a/builtin/commit.c b/builtin/commit.c
index 0d9828e29e..b57d8e4b82 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -33,6 +33,8 @@
#include "sequencer.h"
#include "mailmap.h"
#include "help.h"
+#include "commit-reach.h"
+#include "commit-graph.h"
static const char * const builtin_commit_usage[] = {
N_("git commit [<options>] [--] <pathspec>..."),
@@ -1651,6 +1653,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
"new_index file. Check that disk is not full and quota is\n"
"not exceeded, and then \"git reset HEAD\" to recover."));
+ if (git_env_bool(GIT_TEST_COMMIT_GRAPH, 0))
+ write_commit_graph_reachable(get_object_directory(), 0);
+
rerere(0);
run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
diff --git a/builtin/count-objects.c b/builtin/count-objects.c
index d51e2ce1ec..a7cad052c6 100644
--- a/builtin/count-objects.c
+++ b/builtin/count-objects.c
@@ -123,7 +123,7 @@ int cmd_count_objects(int argc, const char **argv, const char *prefix)
struct strbuf pack_buf = STRBUF_INIT;
struct strbuf garbage_buf = STRBUF_INIT;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
if (open_pack_index(p))
diff --git a/builtin/describe.c b/builtin/describe.c
index 41606c8a90..22c0541da5 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -62,7 +62,7 @@ static const char *prio_names[] = {
N_("head"), N_("lightweight"), N_("annotated"),
};
-static int commit_name_cmp(const void *unused_cmp_data,
+static int commit_name_neq(const void *unused_cmp_data,
const void *entry,
const void *entry_or_key,
const void *peeled)
@@ -70,7 +70,7 @@ static int commit_name_cmp(const void *unused_cmp_data,
const struct commit_name *cn1 = entry;
const struct commit_name *cn2 = entry_or_key;
- return oidcmp(&cn1->peeled, peeled ? peeled : &cn2->peeled);
+ return !oideq(&cn1->peeled, peeled ? peeled : &cn2->peeled);
}
static inline struct commit_name *find_commit_name(const struct object_id *peeled)
@@ -190,7 +190,7 @@ static int get_name(const char *path, const struct object_id *oid, int flag, voi
/* Is it annotated? */
if (!peel_ref(path, &peeled)) {
- is_annotated = !!oidcmp(oid, &peeled);
+ is_annotated = !oideq(oid, &peeled);
} else {
oidcpy(&peeled, oid);
is_annotated = 0;
@@ -469,7 +469,7 @@ static void process_object(struct object *obj, const char *path, void *data)
{
struct process_commit_data *pcd = data;
- if (!oidcmp(&pcd->looking_for, &obj->oid) && !pcd->dst->len) {
+ if (oideq(&pcd->looking_for, &obj->oid) && !pcd->dst->len) {
reset_revision_walk();
describe_commit(&pcd->current_commit, pcd->dst);
strbuf_addf(pcd->dst, ":%s", path);
@@ -596,7 +596,7 @@ int cmd_describe(int argc, const char **argv, const char *prefix)
return cmd_name_rev(args.argc, args.argv, prefix);
}
- hashmap_init(&names, commit_name_cmp, NULL, 0);
+ hashmap_init(&names, commit_name_neq, NULL, 0);
for_each_rawref(get_name, NULL);
if (!hashmap_get_size(&names) && !always)
die(_("No names found, cannot describe anything."));
diff --git a/builtin/diff.c b/builtin/diff.c
index 361a3c3ed3..b3a8ba488f 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -41,7 +41,7 @@ static void stuff_change(struct diff_options *opt,
struct diff_filespec *one, *two;
if (!is_null_oid(old_oid) && !is_null_oid(new_oid) &&
- !oidcmp(old_oid, new_oid) && (old_mode == new_mode))
+ oideq(old_oid, new_oid) && (old_mode == new_mode))
return;
if (opt->flags.reverse_diff) {
diff --git a/builtin/difftool.c b/builtin/difftool.c
index cdd585ca76..b41a9199ff 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -116,7 +116,7 @@ static int use_wt_file(const char *workdir, const char *name,
if (is_null_oid(oid)) {
oidcpy(oid, &wt_oid);
use = 1;
- } else if (!oidcmp(oid, &wt_oid))
+ } else if (oideq(oid, &wt_oid))
use = 1;
}
}
@@ -438,7 +438,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
strbuf_reset(&buf);
strbuf_addf(&buf, "Subproject commit %s",
oid_to_hex(&roid));
- if (!oidcmp(&loid, &roid))
+ if (oideq(&loid, &roid))
strbuf_addstr(&buf, "-dirty");
add_left_or_right(&submodules, dst_path, buf.buf, 1);
continue;
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index 9bd8a14b57..74f3bf5c96 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -384,7 +384,7 @@ static void show_filemodify(struct diff_queue_struct *q,
string_list_insert(changed, spec->path);
putchar('\n');
- if (!oidcmp(&ospec->oid, &spec->oid) &&
+ if (oideq(&ospec->oid, &spec->oid) &&
ospec->mode == spec->mode)
break;
}
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 61bec5d213..0696abfc2a 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -22,6 +22,7 @@
#include "utf8.h"
#include "packfile.h"
#include "list-objects-filter-options.h"
+#include "commit-reach.h"
static const char * const builtin_fetch_usage[] = {
N_("git fetch [<options>] [<repository> [<refspec>...]]"),
@@ -114,7 +115,7 @@ static struct option builtin_fetch_options[] = {
N_("append to .git/FETCH_HEAD instead of overwriting")),
OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
N_("path to upload pack on remote end")),
- OPT__FORCE(&force, N_("force overwrite of local branch"), 0),
+ OPT__FORCE(&force, N_("force overwrite of local reference"), 0),
OPT_BOOL('m', "multiple", &multiple,
N_("fetch from multiple remotes")),
OPT_SET_INT('t', "tags", &tags,
@@ -238,7 +239,7 @@ static int will_fetch(struct ref **head, const unsigned char *sha1)
{
struct ref *rm = *head;
while (rm) {
- if (!hashcmp(rm->old_oid.hash, sha1))
+ if (hasheq(rm->old_oid.hash, sha1))
return 1;
rm = rm->next;
}
@@ -507,7 +508,7 @@ static void adjust_refcol_width(const struct ref *ref)
int max, rlen, llen, len;
/* uptodate lines are only shown on high verbosity level */
- if (!verbosity && !oidcmp(&ref->peer_ref->old_oid, &ref->old_oid))
+ if (!verbosity && oideq(&ref->peer_ref->old_oid, &ref->old_oid))
return;
max = term_columns();
@@ -644,7 +645,7 @@ static int update_local_ref(struct ref *ref,
if (type < 0)
die(_("object %s not found"), oid_to_hex(&ref->new_oid));
- if (!oidcmp(&ref->old_oid, &ref->new_oid)) {
+ if (oideq(&ref->old_oid, &ref->new_oid)) {
if (verbosity > 0)
format_display(display, '=', _("[up to date]"), NULL,
remote, pretty_ref, summary_width);
@@ -667,12 +668,18 @@ static int update_local_ref(struct ref *ref,
if (!is_null_oid(&ref->old_oid) &&
starts_with(ref->name, "refs/tags/")) {
- int r;
- r = s_update_ref("updating tag", ref, 0);
- format_display(display, r ? '!' : 't', _("[tag update]"),
- r ? _("unable to update local ref") : NULL,
- remote, pretty_ref, summary_width);
- return r;
+ if (force || ref->force) {
+ int r;
+ r = s_update_ref("updating tag", ref, 0);
+ format_display(display, r ? '!' : 't', _("[tag update]"),
+ r ? _("unable to update local ref") : NULL,
+ remote, pretty_ref, summary_width);
+ return r;
+ } else {
+ format_display(display, '!', _("[rejected]"), _("would clobber existing tag"),
+ remote, pretty_ref, summary_width);
+ return 1;
+ }
}
current = lookup_commit_reference_gently(the_repository,
diff --git a/builtin/fmt-merge-msg.c b/builtin/fmt-merge-msg.c
index f35ff1612b..59a40342b6 100644
--- a/builtin/fmt-merge-msg.c
+++ b/builtin/fmt-merge-msg.c
@@ -12,6 +12,7 @@
#include "fmt-merge-msg.h"
#include "gpg-interface.h"
#include "repository.h"
+#include "commit-reach.h"
static const char * const fmt_merge_msg_usage[] = {
N_("git fmt-merge-msg [-m <message>] [--log[=<n>] | --no-log] [--file <file>]"),
@@ -78,9 +79,9 @@ static struct merge_parent *find_merge_parent(struct merge_parents *table,
{
int i;
for (i = 0; i < table->nr; i++) {
- if (given && oidcmp(&table->item[i].given, given))
+ if (given && !oideq(&table->item[i].given, given))
continue;
- if (commit && oidcmp(&table->item[i].commit, commit))
+ if (commit && !oideq(&table->item[i].commit, commit))
continue;
return &table->item[i];
}
@@ -582,7 +583,7 @@ static void find_merge_parents(struct merge_parents *result,
while (parents) {
struct commit *cmit = pop_commit(&parents);
for (i = 0; i < result->nr; i++)
- if (!oidcmp(&result->item[i].commit, &cmit->object.oid))
+ if (oideq(&result->item[i].commit, &cmit->object.oid))
result->item[i].used = 1;
}
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 250f5af118..63c8578cc1 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -740,7 +740,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
struct progress *progress = NULL;
if (show_progress) {
- for (p = get_packed_git(the_repository); p;
+ for (p = get_all_packs(the_repository); p;
p = p->next) {
if (open_pack_index(p))
continue;
@@ -749,7 +749,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
progress = start_progress(_("Checking objects"), total);
}
- for (p = get_packed_git(the_repository); p;
+ for (p = get_all_packs(the_repository); p;
p = p->next) {
/* verify gives error messages itself */
if (verify_pack(p, fsck_obj_buffer,
diff --git a/builtin/gc.c b/builtin/gc.c
index 57069442b0..2b592260e9 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -183,7 +183,7 @@ static struct packed_git *find_base_packs(struct string_list *packs,
{
struct packed_git *p, *base = NULL;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
if (limit) {
@@ -208,7 +208,7 @@ static int too_many_packs(void)
if (gc_auto_pack_limit <= 0)
return 0;
- for (cnt = 0, p = get_packed_git(the_repository); p; p = p->next) {
+ for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
if (p->pack_keep)
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 9582ead950..2004e25da2 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -719,9 +719,9 @@ static void find_ref_delta_children(const struct object_id *oid,
*last_index = -1;
return;
}
- while (first > 0 && !oidcmp(&ref_deltas[first - 1].oid, oid))
+ while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid))
--first;
- while (last < end && !oidcmp(&ref_deltas[last + 1].oid, oid))
+ while (last < end && oideq(&ref_deltas[last + 1].oid, oid))
++last;
*first_index = first;
*last_index = last;
@@ -1166,7 +1166,7 @@ static void parse_pack_objects(unsigned char *hash)
/* Check pack integrity */
flush();
the_hash_algo->final_fn(hash, &input_ctx);
- if (hashcmp(fill(the_hash_algo->rawsz), hash))
+ if (!hasheq(fill(the_hash_algo->rawsz), hash))
die(_("pack is corrupted (SHA1 mismatch)"));
use(the_hash_algo->rawsz);
@@ -1280,7 +1280,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
read_hash, consumed_bytes-the_hash_algo->rawsz);
- if (hashcmp(read_hash, tail_hash) != 0)
+ if (!hasheq(read_hash, tail_hash))
die(_("Unexpected tail checksum for %s "
"(disk corruption?)"), curr_pack);
}
diff --git a/builtin/interpret-trailers.c b/builtin/interpret-trailers.c
index b742539d4d..4b87e0dd2e 100644
--- a/builtin/interpret-trailers.c
+++ b/builtin/interpret-trailers.c
@@ -104,6 +104,7 @@ int cmd_interpret_trailers(int argc, const char **argv, const char *prefix)
OPT_BOOL(0, "unfold", &opts.unfold, N_("join whitespace-continued values")),
{ OPTION_CALLBACK, 0, "parse", &opts, NULL, N_("set parsing options"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG, parse_opt_parse },
+ OPT_BOOL(0, "no-divider", &opts.no_divider, N_("do not treat --- specially")),
OPT_CALLBACK(0, "trailer", &trailers, N_("trailer"),
N_("trailer(s) to add"), option_parse_trailer),
OPT_END()
diff --git a/builtin/log.c b/builtin/log.c
index e094560d9a..1dbb9d829b 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -31,6 +31,9 @@
#include "progress.h"
#include "commit-slab.h"
#include "repository.h"
+#include "commit-reach.h"
+#include "interdiff.h"
+#include "range-diff.h"
#define MAIL_DEFAULT_WRAP 72
@@ -992,12 +995,32 @@ static char *find_branch_name(struct rev_info *rev)
tip_oid = &rev->cmdline.rev[positive].item->oid;
if (dwim_ref(ref, strlen(ref), &branch_oid, &full_ref) &&
skip_prefix(full_ref, "refs/heads/", &v) &&
- !oidcmp(tip_oid, &branch_oid))
+ oideq(tip_oid, &branch_oid))
branch = xstrdup(v);
free(full_ref);
return branch;
}
+static void show_diffstat(struct rev_info *rev,
+ struct commit *origin, struct commit *head)
+{
+ struct diff_options opts;
+
+ memcpy(&opts, &rev->diffopt, sizeof(opts));
+ opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ opts.stat_width = MAIL_DEFAULT_WRAP;
+
+ diff_setup_done(&opts);
+
+ diff_tree_oid(get_commit_tree_oid(origin),
+ get_commit_tree_oid(head),
+ "", &opts);
+ diffcore_std(&opts);
+ diff_flush(&opts);
+
+ fprintf(rev->diffopt.file, "\n");
+}
+
static void make_cover_letter(struct rev_info *rev, int use_stdout,
struct commit *origin,
int nr, struct commit **list,
@@ -1011,7 +1034,6 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
struct strbuf sb = STRBUF_INIT;
int i;
const char *encoding = "UTF-8";
- struct diff_options opts;
int need_8bit_cte = 0;
struct pretty_print_context pp = {0};
struct commit *head = list[0];
@@ -1061,25 +1083,20 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
shortlog_output(&log);
- /*
- * We can only do diffstat with a unique reference point
- */
- if (!origin)
- return;
-
- memcpy(&opts, &rev->diffopt, sizeof(opts));
- opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
- opts.stat_width = MAIL_DEFAULT_WRAP;
+ /* We can only do diffstat with a unique reference point */
+ if (origin)
+ show_diffstat(rev, origin, head);
- diff_setup_done(&opts);
-
- diff_tree_oid(get_commit_tree_oid(origin),
- get_commit_tree_oid(head),
- "", &opts);
- diffcore_std(&opts);
- diff_flush(&opts);
+ if (rev->idiff_oid1) {
+ fprintf_ln(rev->diffopt.file, "%s", rev->idiff_title);
+ show_interdiff(rev, 0);
+ }
- fprintf(rev->diffopt.file, "\n");
+ if (rev->rdiff1) {
+ fprintf_ln(rev->diffopt.file, "%s", rev->rdiff_title);
+ show_range_diff(rev->rdiff1, rev->rdiff2,
+ rev->creation_factor, 1, &rev->diffopt);
+ }
}
static const char *clean_message_id(const char *msg_id)
@@ -1419,6 +1436,36 @@ static void print_bases(struct base_tree_info *bases, FILE *file)
oidclr(&bases->base_commit);
}
+static const char *diff_title(struct strbuf *sb, int reroll_count,
+ const char *generic, const char *rerolled)
+{
+ if (reroll_count <= 0)
+ strbuf_addstr(sb, generic);
+ else /* RFC may be v0, so allow -v1 to diff against v0 */
+ strbuf_addf(sb, rerolled, reroll_count - 1);
+ return sb->buf;
+}
+
+static void infer_range_diff_ranges(struct strbuf *r1,
+ struct strbuf *r2,
+ const char *prev,
+ struct commit *origin,
+ struct commit *head)
+{
+ const char *head_oid = oid_to_hex(&head->object.oid);
+
+ if (!strstr(prev, "..")) {
+ strbuf_addf(r1, "%s..%s", head_oid, prev);
+ strbuf_addf(r2, "%s..%s", prev, head_oid);
+ } else if (!origin) {
+ die(_("failed to infer range-diff ranges"));
+ } else {
+ strbuf_addstr(r1, prev);
+ strbuf_addf(r2, "%s..%s",
+ oid_to_hex(&origin->object.oid), head_oid);
+ }
+}
+
int cmd_format_patch(int argc, const char **argv, const char *prefix)
{
struct commit *commit;
@@ -1446,6 +1493,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
struct base_tree_info bases;
int show_progress = 0;
struct progress *progress = NULL;
+ struct oid_array idiff_prev = OID_ARRAY_INIT;
+ struct strbuf idiff_title = STRBUF_INIT;
+ const char *rdiff_prev = NULL;
+ struct strbuf rdiff1 = STRBUF_INIT;
+ struct strbuf rdiff2 = STRBUF_INIT;
+ struct strbuf rdiff_title = STRBUF_INIT;
+ int creation_factor = -1;
const struct option builtin_format_patch_options[] = {
{ OPTION_CALLBACK, 'n', "numbered", &numbered, NULL,
@@ -1519,6 +1573,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
OPT__QUIET(&quiet, N_("don't print the patch filenames")),
OPT_BOOL(0, "progress", &show_progress,
N_("show progress while generating patches")),
+ OPT_CALLBACK(0, "interdiff", &idiff_prev, N_("rev"),
+ N_("show changes against <rev> in cover letter or single patch"),
+ parse_opt_object_name),
+ OPT_STRING(0, "range-diff", &rdiff_prev, N_("refspec"),
+ N_("show changes against <refspec> in cover letter or single patch")),
+ OPT_INTEGER(0, "creation-factor", &creation_factor,
+ N_("percentage by which creation is weighted")),
OPT_END()
};
@@ -1703,8 +1764,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
/* Don't say anything if head and upstream are the same. */
if (rev.pending.nr == 2) {
struct object_array_entry *o = rev.pending.objects;
- if (oidcmp(&o[0].item->oid, &o[1].item->oid) == 0)
- return 0;
+ if (oideq(&o[0].item->oid, &o[1].item->oid))
+ goto done;
}
get_patch_ids(&rev, &ids);
}
@@ -1728,7 +1789,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
}
if (nr == 0)
/* nothing to do */
- return 0;
+ goto done;
total = nr;
if (cover_letter == -1) {
if (config_cover_letter == COVER_AUTO)
@@ -1741,6 +1802,35 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (numbered)
rev.total = total + start_number - 1;
+ if (idiff_prev.nr) {
+ if (!cover_letter && total != 1)
+ die(_("--interdiff requires --cover-letter or single patch"));
+ rev.idiff_oid1 = &idiff_prev.oid[idiff_prev.nr - 1];
+ rev.idiff_oid2 = get_commit_tree_oid(list[0]);
+ rev.idiff_title = diff_title(&idiff_title, reroll_count,
+ _("Interdiff:"),
+ _("Interdiff against v%d:"));
+ }
+
+ if (creation_factor < 0)
+ creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT;
+ else if (!rdiff_prev)
+ die(_("--creation-factor requires --range-diff"));
+
+ if (rdiff_prev) {
+ if (!cover_letter && total != 1)
+ die(_("--range-diff requires --cover-letter or single patch"));
+
+ infer_range_diff_ranges(&rdiff1, &rdiff2, rdiff_prev,
+ origin, list[0]);
+ rev.rdiff1 = rdiff1.buf;
+ rev.rdiff2 = rdiff2.buf;
+ rev.creation_factor = creation_factor;
+ rev.rdiff_title = diff_title(&rdiff_title, reroll_count,
+ _("Range-diff:"),
+ _("Range-diff against v%d:"));
+ }
+
if (!signature) {
; /* --no-signature inhibits all signatures */
} else if (signature && signature != git_version_string) {
@@ -1778,6 +1868,9 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
print_signature(rev.diffopt.file);
total++;
start_number--;
+ /* interdiff/range-diff in cover-letter; omit from patches */
+ rev.idiff_oid1 = NULL;
+ rev.rdiff1 = NULL;
}
rev.add_signoff = do_signoff;
@@ -1858,6 +1951,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
string_list_clear(&extra_hdr, 0);
if (ignore_if_in_upstream)
free_patch_ids(&ids);
+
+done:
+ oid_array_clear(&idiff_prev);
+ strbuf_release(&idiff_title);
+ strbuf_release(&rdiff1);
+ strbuf_release(&rdiff2);
+ strbuf_release(&rdiff_title);
return 0;
}
@@ -1949,7 +2049,7 @@ int cmd_cherry(int argc, const char **argv, const char *prefix)
/* Don't say anything if head and upstream are the same. */
if (revs.pending.nr == 2) {
struct object_array_entry *o = revs.pending.objects;
- if (oidcmp(&o[0].item->oid, &o[1].item->oid) == 0)
+ if (oideq(&o[0].item->oid, &o[1].item->oid))
return 0;
}
diff --git a/builtin/merge-base.c b/builtin/merge-base.c
index 08d91b1f0c..1c92099070 100644
--- a/builtin/merge-base.c
+++ b/builtin/merge-base.c
@@ -7,6 +7,7 @@
#include "revision.h"
#include "parse-options.h"
#include "repository.h"
+#include "commit-reach.h"
static int show_merge_base(struct commit **rev, int rev_nr, int show_all)
{
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
index f8023bae1e..8cea8a74f2 100644
--- a/builtin/merge-tree.c
+++ b/builtin/merge-tree.c
@@ -155,7 +155,7 @@ static int same_entry(struct name_entry *a, struct name_entry *b)
{
return a->oid &&
b->oid &&
- !oidcmp(a->oid, b->oid) &&
+ oideq(a->oid, b->oid) &&
a->mode == b->mode;
}
diff --git a/builtin/merge.c b/builtin/merge.c
index 8f4a5065c2..e331ca6d48 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -36,6 +36,7 @@
#include "packfile.h"
#include "tag.h"
#include "alias.h"
+#include "commit-reach.h"
#define DEFAULT_TWOHEAD (1<<0)
#define DEFAULT_OCTOPUS (1<<1)
@@ -1189,7 +1190,7 @@ static int merging_a_throwaway_tag(struct commit *commit)
tag_ref = xstrfmt("refs/tags/%s",
((struct tag *)merge_remote_util(commit)->obj)->tag);
if (!read_ref(tag_ref, &oid) &&
- !oidcmp(&oid, &merge_remote_util(commit)->obj->oid))
+ oideq(&oid, &merge_remote_util(commit)->obj->oid))
is_throwaway_tag = 0;
else
is_throwaway_tag = 1;
@@ -1448,7 +1449,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
goto done;
} else if (fast_forward != FF_NO && !remoteheads->next &&
!common->next &&
- !oidcmp(&common->item->object.oid, &head_commit->object.oid)) {
+ oideq(&common->item->object.oid, &head_commit->object.oid)) {
/* Again the most common case of merging one remote. */
struct strbuf msg = STRBUF_INIT;
struct commit *commit;
@@ -1521,7 +1522,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
* HEAD^^" would be missed.
*/
common_one = get_merge_bases(head_commit, j->item);
- if (oidcmp(&common_one->item->object.oid, &j->item->object.oid)) {
+ if (!oideq(&common_one->item->object.oid, &j->item->object.oid)) {
up_to_date = 0;
break;
}
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
new file mode 100644
index 0000000000..2633efd95d
--- /dev/null
+++ b/builtin/multi-pack-index.c
@@ -0,0 +1,47 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "parse-options.h"
+#include "midx.h"
+
+static char const * const builtin_multi_pack_index_usage[] = {
+ N_("git multi-pack-index [--object-dir=<dir>] write"),
+ NULL
+};
+
+static struct opts_multi_pack_index {
+ const char *object_dir;
+} opts;
+
+int cmd_multi_pack_index(int argc, const char **argv,
+ const char *prefix)
+{
+ static struct option builtin_multi_pack_index_options[] = {
+ OPT_FILENAME(0, "object-dir", &opts.object_dir,
+ N_("object directory containing set of packfile and pack-index pairs")),
+ OPT_END(),
+ };
+
+ git_config(git_default_config, NULL);
+
+ argc = parse_options(argc, argv, prefix,
+ builtin_multi_pack_index_options,
+ builtin_multi_pack_index_usage, 0);
+
+ if (!opts.object_dir)
+ opts.object_dir = get_object_directory();
+
+ if (argc == 0)
+ usage_with_options(builtin_multi_pack_index_usage,
+ builtin_multi_pack_index_options);
+
+ if (argc > 1) {
+ die(_("too many arguments"));
+ return 1;
+ }
+
+ if (!strcmp(argv[0], "write"))
+ return write_midx_file(opts.object_dir);
+
+ die(_("unrecognized verb: %s"), argv[0]);
+}
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index d1144a8f7e..e6316d294d 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -24,6 +24,7 @@
#include "streaming.h"
#include "thread-utils.h"
#include "pack-bitmap.h"
+#include "delta-islands.h"
#include "reachable.h"
#include "sha1-array.h"
#include "argv-array.h"
@@ -31,6 +32,7 @@
#include "packfile.h"
#include "object-store.h"
#include "dir.h"
+#include "midx.h"
#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
#define SIZE(obj) oe_size(&to_pack, obj)
@@ -40,6 +42,7 @@
#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid)
#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
@@ -59,6 +62,8 @@ static struct packing_data to_pack;
static struct pack_idx_entry **written_list;
static uint32_t nr_result, nr_written, nr_seen;
+static struct bitmap_index *bitmap_git;
+static uint32_t write_layer;
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
@@ -79,6 +84,7 @@ static unsigned long pack_size_limit;
static int depth = 50;
static int delta_search_threads;
static int pack_to_stdout;
+static int thin;
static int num_preferred_base;
static struct progress *progress_state;
@@ -93,6 +99,8 @@ static uint16_t write_bitmap_options;
static int exclude_promisor_objects;
+static int use_delta_islands;
+
static unsigned long delta_cache_size = 0;
static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static unsigned long cache_max_small_delta_size = 1000;
@@ -612,7 +620,7 @@ static inline void add_to_write_order(struct object_entry **wo,
unsigned int *endp,
struct object_entry *e)
{
- if (e->filled)
+ if (e->filled || oe_layer(&to_pack, e) != write_layer)
return;
wo[(*endp)++] = e;
e->filled = 1;
@@ -672,48 +680,15 @@ static void add_family_to_write_order(struct object_entry **wo,
add_descendants_to_write_order(wo, endp, root);
}
-static struct object_entry **compute_write_order(void)
+static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end)
{
- unsigned int i, wo_end, last_untagged;
-
- struct object_entry **wo;
+ unsigned int i, last_untagged;
struct object_entry *objects = to_pack.objects;
for (i = 0; i < to_pack.nr_objects; i++) {
- objects[i].tagged = 0;
- objects[i].filled = 0;
- SET_DELTA_CHILD(&objects[i], NULL);
- SET_DELTA_SIBLING(&objects[i], NULL);
- }
-
- /*
- * Fully connect delta_child/delta_sibling network.
- * Make sure delta_sibling is sorted in the original
- * recency order.
- */
- for (i = to_pack.nr_objects; i > 0;) {
- struct object_entry *e = &objects[--i];
- if (!DELTA(e))
- continue;
- /* Mark me as the first child */
- e->delta_sibling_idx = DELTA(e)->delta_child_idx;
- SET_DELTA_CHILD(DELTA(e), e);
- }
-
- /*
- * Mark objects that are at the tip of tags.
- */
- for_each_tag_ref(mark_tagged, NULL);
-
- /*
- * Give the objects in the original recency order until
- * we see a tagged tip.
- */
- ALLOC_ARRAY(wo, to_pack.nr_objects);
- for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
if (objects[i].tagged)
break;
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
last_untagged = i;
@@ -722,7 +697,7 @@ static struct object_entry **compute_write_order(void)
*/
for (; i < to_pack.nr_objects; i++) {
if (objects[i].tagged)
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
/*
@@ -732,7 +707,7 @@ static struct object_entry **compute_write_order(void)
if (oe_type(&objects[i]) != OBJ_COMMIT &&
oe_type(&objects[i]) != OBJ_TAG)
continue;
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
/*
@@ -741,16 +716,60 @@ static struct object_entry **compute_write_order(void)
for (i = last_untagged; i < to_pack.nr_objects; i++) {
if (oe_type(&objects[i]) != OBJ_TREE)
continue;
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
/*
* Finally all the rest in really tight order
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (!objects[i].filled)
- add_family_to_write_order(wo, &wo_end, &objects[i]);
+ if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer)
+ add_family_to_write_order(wo, wo_end, &objects[i]);
}
+}
+
+static struct object_entry **compute_write_order(void)
+{
+ uint32_t max_layers = 1;
+ unsigned int i, wo_end;
+
+ struct object_entry **wo;
+ struct object_entry *objects = to_pack.objects;
+
+ for (i = 0; i < to_pack.nr_objects; i++) {
+ objects[i].tagged = 0;
+ objects[i].filled = 0;
+ SET_DELTA_CHILD(&objects[i], NULL);
+ SET_DELTA_SIBLING(&objects[i], NULL);
+ }
+
+ /*
+ * Fully connect delta_child/delta_sibling network.
+ * Make sure delta_sibling is sorted in the original
+ * recency order.
+ */
+ for (i = to_pack.nr_objects; i > 0;) {
+ struct object_entry *e = &objects[--i];
+ if (!DELTA(e))
+ continue;
+ /* Mark me as the first child */
+ e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+ SET_DELTA_CHILD(DELTA(e), e);
+ }
+
+ /*
+ * Mark objects that are at the tip of tags.
+ */
+ for_each_tag_ref(mark_tagged, NULL);
+
+ if (use_delta_islands)
+ max_layers = compute_pack_layers(&to_pack);
+
+ ALLOC_ARRAY(wo, to_pack.nr_objects);
+ wo_end = 0;
+
+ for (; write_layer < max_layers; ++write_layer)
+ compute_layer_order(wo, &wo_end);
if (wo_end != to_pack.nr_objects)
die(_("ordered %u objects, expected %"PRIu32),
@@ -951,8 +970,7 @@ static int no_try_delta(const char *path)
if (!check)
check = attr_check_initl("delta", NULL);
- if (git_check_attr(&the_index, path, check))
- return 0;
+ git_check_attr(&the_index, path, check);
if (ATTR_FALSE(check->items[0].value))
return 1;
return 0;
@@ -1040,6 +1058,7 @@ static int want_object_in_pack(const struct object_id *oid,
{
int want;
struct list_head *pos;
+ struct multi_pack_index *m;
if (!exclude && local && has_loose_object_nonlocal(oid))
return 0;
@@ -1054,6 +1073,32 @@ static int want_object_in_pack(const struct object_id *oid,
if (want != -1)
return want;
}
+
+ for (m = get_multi_pack_index(the_repository); m; m = m->next) {
+ struct pack_entry e;
+ if (fill_midx_entry(oid, &e, m)) {
+ struct packed_git *p = e.p;
+ off_t offset;
+
+ if (p == *found_pack)
+ offset = *found_offset;
+ else
+ offset = find_pack_entry_one(oid->hash, p);
+
+ if (offset) {
+ if (!*found_pack) {
+ if (!is_pack_valid(p))
+ continue;
+ *found_offset = offset;
+ *found_pack = p;
+ }
+ want = want_found_object(exclude, p);
+ if (want != -1)
+ return want;
+ }
+ }
+ }
+
list_for_each(pos, get_packed_git_mru(the_repository)) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
off_t offset;
@@ -1202,7 +1247,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
*/
for (neigh = 0; neigh < 8; neigh++) {
ent = pbase_tree_cache[my_ix];
- if (ent && !oidcmp(&ent->oid, oid)) {
+ if (ent && oideq(&ent->oid, oid)) {
ent->ref++;
return ent;
}
@@ -1384,7 +1429,7 @@ static void add_preferred_base(struct object_id *oid)
return;
for (it = pbase_tree; it; it = it->next) {
- if (!oidcmp(&it->pcache.oid, &tree_oid)) {
+ if (oideq(&it->pcache.oid, &tree_oid)) {
free(data);
return;
}
@@ -1510,11 +1555,16 @@ static void check_object(struct object_entry *entry)
break;
}
- if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
+ if (base_ref && (
+ (base_entry = packlist_find(&to_pack, base_ref, NULL)) ||
+ (thin &&
+ bitmap_has_sha1_in_uninteresting(bitmap_git, base_ref))) &&
+ in_same_island(&entry->idx.oid, &base_entry->idx.oid)) {
/*
* If base_ref was set above that means we wish to
- * reuse delta data, and we even found that base
- * in the list of objects we want to pack. Goodie!
+ * reuse delta data, and either we found that object in
+ * the list of objects we want to pack, or it's one we
+ * know the receiver has.
*
* Depth value does not matter - find_deltas() will
* never consider reused delta as the base object to
@@ -1523,10 +1573,16 @@ static void check_object(struct object_entry *entry)
*/
oe_set_type(entry, entry->in_pack_type);
SET_SIZE(entry, in_pack_size); /* delta size */
- SET_DELTA(entry, base_entry);
SET_DELTA_SIZE(entry, in_pack_size);
- entry->delta_sibling_idx = base_entry->delta_child_idx;
- SET_DELTA_CHILD(base_entry, entry);
+
+ if (base_entry) {
+ SET_DELTA(entry, base_entry);
+ entry->delta_sibling_idx = base_entry->delta_child_idx;
+ SET_DELTA_CHILD(base_entry, entry);
+ } else {
+ SET_DELTA_EXT(entry, base_ref);
+ }
+
unuse_pack(&w_curs);
return;
}
@@ -1826,6 +1882,11 @@ static int type_size_sort(const void *_a, const void *_b)
return -1;
if (a->preferred_base < b->preferred_base)
return 1;
+ if (use_delta_islands) {
+ int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid);
+ if (island_cmp)
+ return island_cmp;
+ }
if (a_size > b_size)
return -1;
if (a_size < b_size)
@@ -1986,6 +2047,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
if (trg_size < src_size / 32)
return 0;
+ if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid))
+ return 0;
+
/* Load data if not already done */
if (!trg->data) {
read_lock();
@@ -2528,6 +2592,9 @@ static void prepare_pack(int window, int depth)
uint32_t i, nr_deltas;
unsigned n;
+ if (use_delta_islands)
+ resolve_tree_islands(progress, &to_pack);
+
get_object_details();
/*
@@ -2691,6 +2758,9 @@ static void show_commit(struct commit *commit, void *data)
if (write_bitmap_index)
index_commit_for_bitmap(commit);
+
+ if (use_delta_islands)
+ propagate_island_marks(commit);
}
static void show_object(struct object *obj, const char *name, void *data)
@@ -2698,6 +2768,19 @@ static void show_object(struct object *obj, const char *name, void *data)
add_preferred_base_object(name);
add_object_entry(&obj->oid, obj->type, name, 0);
obj->flags |= OBJECT_ADDED;
+
+ if (use_delta_islands) {
+ const char *p;
+ unsigned depth = 0;
+ struct object_entry *ent;
+
+ for (p = strchr(name, '/'); p; p = strchr(p + 1, '/'))
+ depth++;
+
+ ent = packlist_find(&to_pack, obj->oid.hash, NULL);
+ if (ent && depth > oe_tree_depth(&to_pack, ent))
+ oe_set_tree_depth(&to_pack, ent, depth);
+ }
}
static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
@@ -2806,7 +2889,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
memset(&in_pack, 0, sizeof(in_pack));
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
struct object_id oid;
struct object *o;
@@ -2870,7 +2953,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
struct packed_git *p;
p = (last_found != (void *)1) ? last_found :
- get_packed_git(the_repository);
+ get_all_packs(the_repository);
while (p) {
if ((!p->pack_local || p->pack_keep ||
@@ -2880,7 +2963,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
return 1;
}
if (p == last_found)
- p = get_packed_git(the_repository);
+ p = get_all_packs(the_repository);
else
p = p->next;
if (p == last_found)
@@ -2916,7 +2999,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
uint32_t i;
struct object_id oid;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
@@ -2951,7 +3034,6 @@ static int pack_options_allow_reuse(void)
static int get_object_list_from_bitmap(struct rev_info *revs)
{
- struct bitmap_index *bitmap_git;
if (!(bitmap_git = prepare_bitmap_walk(revs)))
return -1;
@@ -2967,7 +3049,6 @@ static int get_object_list_from_bitmap(struct rev_info *revs)
}
traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);
- free_bitmap_index(bitmap_git);
return 0;
}
@@ -3025,6 +3106,9 @@ static void get_object_list(int ac, const char **av)
if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
return;
+ if (use_delta_islands)
+ load_delta_islands();
+
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge);
@@ -3063,7 +3147,7 @@ static void add_extra_kept_packs(const struct string_list *names)
if (!names->nr)
return;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
const char *name = basename(p->pack_name);
int i;
@@ -3115,7 +3199,6 @@ static int option_parse_unpack_unreachable(const struct option *opt,
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
- int thin = 0;
int shallow = 0;
int all_progress_implied = 0;
struct argv_array rp = ARGV_ARRAY_INIT;
@@ -3204,6 +3287,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
option_parse_missing_action },
OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
N_("do not pack objects in promisor packfiles")),
+ OPT_BOOL(0, "delta-islands", &use_delta_islands,
+ N_("respect islands during delta compression")),
OPT_END(),
};
@@ -3330,13 +3415,16 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (pack_to_stdout || !rev_list_all)
write_bitmap_index = 0;
+ if (use_delta_islands)
+ argv_array_push(&rp, "--topo-order");
+
if (progress && all_progress_implied)
progress = 2;
add_extra_kept_packs(&keep_pack_list);
if (ignore_packed_keep_on_disk) {
struct packed_git *p;
- for (p = get_packed_git(the_repository); p; p = p->next)
+ for (p = get_all_packs(the_repository); p; p = p->next)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
@@ -3349,7 +3437,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
* it also covers non-local objects
*/
struct packed_git *p;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local) {
have_non_local_packs = 1;
break;
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index 0494dceff7..cf9a9aabd4 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -577,7 +577,7 @@ static struct pack_list * add_pack(struct packed_git *p)
static struct pack_list * add_pack_file(const char *filename)
{
- struct packed_git *p = get_packed_git(the_repository);
+ struct packed_git *p = get_all_packs(the_repository);
if (strlen(filename) < 40)
die("Bad pack filename: %s", filename);
@@ -592,7 +592,7 @@ static struct pack_list * add_pack_file(const char *filename)
static void load_all(void)
{
- struct packed_git *p = get_packed_git(the_repository);
+ struct packed_git *p = get_all_packs(the_repository);
while (p) {
add_pack(p);
diff --git a/builtin/pull.c b/builtin/pull.c
index 681c127a07..b2055d1dd6 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -22,6 +22,7 @@
#include "tempfile.h"
#include "lockfile.h"
#include "wt-status.h"
+#include "commit-reach.h"
enum rebase_type {
REBASE_INVALID = -1,
@@ -799,7 +800,7 @@ static int run_rebase(const struct object_id *curr_head,
struct argv_array args = ARGV_ARRAY_INIT;
if (!get_octopus_merge_base(&oct_merge_base, curr_head, merge_head, fork_point))
- if (!is_null_oid(fork_point) && !oidcmp(&oct_merge_base, fork_point))
+ if (!is_null_oid(fork_point) && oideq(&oct_merge_base, fork_point))
fork_point = NULL;
argv_array_push(&args, "rebase");
@@ -902,7 +903,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
oidclr(&curr_head);
if (!is_null_oid(&orig_head) && !is_null_oid(&curr_head) &&
- oidcmp(&orig_head, &curr_head)) {
+ !oideq(&orig_head, &curr_head)) {
/*
* The fetch involved updating the current branch.
*
diff --git a/builtin/range-diff.c b/builtin/range-diff.c
index 0aa9bed41f..96af537493 100644
--- a/builtin/range-diff.c
+++ b/builtin/range-diff.c
@@ -11,14 +11,9 @@ N_("git range-diff [<options>] <base> <old-tip> <new-tip>"),
NULL
};
-static struct strbuf *output_prefix_cb(struct diff_options *opt, void *data)
-{
- return data;
-}
-
int cmd_range_diff(int argc, const char **argv, const char *prefix)
{
- int creation_factor = 60;
+ int creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT;
struct diff_options diffopt = { NULL };
int simple_color = -1;
struct option options[] = {
@@ -29,17 +24,11 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
OPT_END()
};
int i, j, res = 0;
- struct strbuf four_spaces = STRBUF_INIT;
struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT;
git_config(git_diff_ui_config, NULL);
diff_setup(&diffopt);
- diffopt.output_format = DIFF_FORMAT_PATCH;
- diffopt.flags.suppress_diff_headers = 1;
- diffopt.output_prefix = output_prefix_cb;
- strbuf_addstr(&four_spaces, " ");
- diffopt.output_prefix_data = &four_spaces;
argc = parse_options(argc, argv, NULL, options,
builtin_range_diff_usage, PARSE_OPT_KEEP_UNKNOWN |
@@ -63,12 +52,9 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
options + ARRAY_SIZE(options) - 1, /* OPT_END */
builtin_range_diff_usage, 0);
- if (simple_color < 1) {
- if (!simple_color)
- /* force color when --dual-color was used */
- diffopt.use_color = 1;
- diffopt.flags.dual_color_diffed_diffs = 1;
- }
+ /* force color when --dual-color was used */
+ if (!simple_color)
+ diffopt.use_color = 1;
if (argc == 2) {
if (!strstr(argv[0], ".."))
@@ -106,11 +92,10 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
}
res = show_range_diff(range1.buf, range2.buf, creation_factor,
- &diffopt);
+ simple_color < 1, &diffopt);
strbuf_release(&range1);
strbuf_release(&range2);
- strbuf_release(&four_spaces);
return res;
}
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index c17ce94e12..4d30001950 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -27,6 +27,7 @@
#include "packfile.h"
#include "object-store.h"
#include "protocol.h"
+#include "commit-reach.h"
static const char * const receive_pack_usage[] = {
N_("git receive-pack <git-dir>"),
@@ -465,7 +466,7 @@ static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
unsigned char sha1[GIT_SHA1_RAWSZ];
strbuf_addf(&buf, "%s:%"PRItime, path, stamp);
- hmac_sha1(sha1, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));;
+ hmac_sha1(sha1, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));
strbuf_release(&buf);
/* RFC 2104 5. HMAC-SHA1-80 */
@@ -1222,8 +1223,8 @@ static void check_aliased_update(struct command *cmd, struct string_list *list)
dst_cmd = (struct command *) item->util;
- if (!oidcmp(&cmd->old_oid, &dst_cmd->old_oid) &&
- !oidcmp(&cmd->new_oid, &dst_cmd->new_oid))
+ if (oideq(&cmd->old_oid, &dst_cmd->old_oid) &&
+ oideq(&cmd->new_oid, &dst_cmd->new_oid))
return;
dst_cmd->skip_update = 1;
diff --git a/builtin/remote.c b/builtin/remote.c
index 7876db1c20..40c6f8a1bd 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -10,6 +10,7 @@
#include "refspec.h"
#include "object-store.h"
#include "argv-array.h"
+#include "commit-reach.h"
static const char * const builtin_remote_usage[] = {
N_("git remote [-v | --verbose]"),
@@ -412,7 +413,7 @@ static int get_push_ref_states(const struct ref *remote_refs,
if (is_null_oid(&ref->new_oid)) {
info->status = PUSH_STATUS_DELETE;
- } else if (!oidcmp(&ref->old_oid, &ref->new_oid))
+ } else if (oideq(&ref->old_oid, &ref->new_oid))
info->status = PUSH_STATUS_UPTODATE;
else if (is_null_oid(&ref->old_oid))
info->status = PUSH_STATUS_CREATE;
diff --git a/builtin/repack.c b/builtin/repack.c
index d5886039cc..c6a7943d5c 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -8,12 +8,14 @@
#include "strbuf.h"
#include "string-list.h"
#include "argv-array.h"
+#include "midx.h"
#include "packfile.h"
#include "object-store.h"
static int delta_base_offset = 1;
static int pack_kept_objects = -1;
static int write_bitmaps;
+static int use_delta_islands;
static char *packdir, *packtmp;
static const char *const git_repack_usage[] = {
@@ -42,6 +44,10 @@ static int repack_config(const char *var, const char *value, void *cb)
write_bitmaps = git_config_bool(var, value);
return 0;
}
+ if (!strcmp(var, "repack.usedeltaislands")) {
+ use_delta_islands = git_config_bool(var, value);
+ return 0;
+ }
return git_default_config(var, value, cb);
}
@@ -280,6 +286,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
int keep_unreachable = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
int no_update_server_info = 0;
+ int midx_cleared = 0;
struct pack_objects_args po_args = {NULL};
struct option builtin_repack_options[] = {
@@ -301,6 +308,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
N_("pass --local to git-pack-objects")),
OPT_BOOL('b', "write-bitmap-index", &write_bitmaps,
N_("write bitmap index")),
+ OPT_BOOL('i', "delta-islands", &use_delta_islands,
+ N_("pass --delta-islands to git-pack-objects")),
OPT_STRING(0, "unpack-unreachable", &unpack_unreachable, N_("approxidate"),
N_("with -A, do not loosen objects older than this")),
OPT_BOOL('k', "keep-unreachable", &keep_unreachable,
@@ -361,6 +370,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
argv_array_push(&cmd.args, "--exclude-promisor-objects");
if (write_bitmaps)
argv_array_push(&cmd.args, "--write-bitmap-index");
+ if (use_delta_islands)
+ argv_array_push(&cmd.args, "--delta-islands");
if (pack_everything & ALL_INTO_ONE) {
get_non_kept_pack_filenames(&existing_packs, &keep_pack_list);
@@ -418,6 +429,13 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
for_each_string_list_item(item, &names) {
for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
char *fname, *fname_old;
+
+ if (!midx_cleared) {
+ /* if we move a packfile, it will invalidated the midx */
+ clear_midx_file(get_object_directory());
+ midx_cleared = 1;
+ }
+
fname = mkpathdup("%s/pack-%s%s", packdir,
item->string, exts[ext].name);
if (!file_exists(fname)) {
diff --git a/builtin/replace.c b/builtin/replace.c
index 4f05791f3e..8e67e09819 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -343,7 +343,7 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
}
free(tmpfile);
- if (!oidcmp(&old_oid, &new_oid))
+ if (oideq(&old_oid, &new_oid))
return error(_("new object is the same as the old one: '%s'"), oid_to_hex(&old_oid));
return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
@@ -414,7 +414,7 @@ static int check_one_mergetag(struct commit *commit,
if (get_oid(mergetag_data->argv[i], &oid) < 0)
return error(_("not a valid object name: '%s'"),
mergetag_data->argv[i]);
- if (!oidcmp(&tag->tagged->oid, &oid))
+ if (oideq(&tag->tagged->oid, &oid))
return 0; /* found */
}
@@ -474,7 +474,7 @@ static int create_graft(int argc, const char **argv, int force, int gentle)
strbuf_release(&buf);
- if (!oidcmp(&old_oid, &new_oid)) {
+ if (oideq(&old_oid, &new_oid)) {
if (gentle) {
warning(_("graft for '%s' unnecessary"), oid_to_hex(&old_oid));
return 0;
diff --git a/builtin/rerere.c b/builtin/rerere.c
index 0bc40298c2..5ed941b91f 100644
--- a/builtin/rerere.c
+++ b/builtin/rerere.c
@@ -75,7 +75,7 @@ int cmd_rerere(int argc, const char **argv, const char *prefix)
if (!strcmp(argv[0], "forget")) {
struct pathspec pathspec;
if (argc < 2)
- warning("'git rerere forget' without paths is deprecated");
+ warning(_("'git rerere forget' without paths is deprecated"));
parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_CWD,
prefix, argv + 1);
return rerere_forget(&pathspec);
@@ -107,7 +107,7 @@ int cmd_rerere(int argc, const char **argv, const char *prefix)
const char *path = merge_rr.items[i].string;
const struct rerere_id *id = merge_rr.items[i].util;
if (diff_two(rerere_path(id, "preimage"), path, path, path))
- die("unable to generate diff for %s", rerere_path(id, NULL));
+ die(_("unable to generate diff for '%s'"), rerere_path(id, NULL));
}
} else
usage_with_options(rerere_usage, options);
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 5b07f3f4a2..ed0ea7dc5b 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -493,7 +493,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
if ((!revs.commits && reflog_walk_empty(revs.reflog_info) &&
(!(revs.tag_objects || revs.tree_objects || revs.blob_objects) &&
!revs.pending.nr) &&
- !revs.rev_input_given) ||
+ !revs.rev_input_given && !revs.read_from_stdin) ||
revs.diff)
usage(rev_list_usage);
diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c
index 0f09bbbf65..455f62246d 100644
--- a/builtin/rev-parse.c
+++ b/builtin/rev-parse.c
@@ -14,6 +14,7 @@
#include "revision.h"
#include "split-index.h"
#include "submodule.h"
+#include "commit-reach.h"
#define DO_REVS 1
#define DO_NOREV 2
diff --git a/builtin/rm.c b/builtin/rm.c
index 2cbe89e0ae..17086d3d97 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -180,7 +180,7 @@ static int check_local_mod(struct object_id *head, int index_only)
if (no_head
|| get_tree_entry(head, name, &oid, &mode)
|| ce->ce_mode != create_ce_mode(mode)
- || oidcmp(&ce->oid, &oid))
+ || !oideq(&ce->oid, &oid))
staged_changes = 1;
/*
diff --git a/builtin/show-branch.c b/builtin/show-branch.c
index 363cf8509a..65f4a4c83c 100644
--- a/builtin/show-branch.c
+++ b/builtin/show-branch.c
@@ -412,7 +412,7 @@ static int append_head_ref(const char *refname, const struct object_id *oid,
/* If both heads/foo and tags/foo exists, get_sha1 would
* get confused.
*/
- if (get_oid(refname + ofs, &tmp) || oidcmp(&tmp, oid))
+ if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid))
ofs = 5;
return append_ref(refname + ofs, oid, 0);
}
@@ -427,7 +427,7 @@ static int append_remote_ref(const char *refname, const struct object_id *oid,
/* If both heads/foo and tags/foo exists, get_sha1 would
* get confused.
*/
- if (get_oid(refname + ofs, &tmp) || oidcmp(&tmp, oid))
+ if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid))
ofs = 5;
return append_ref(refname + ofs, oid, 0);
}
@@ -485,7 +485,7 @@ static void snarf_refs(int head, int remotes)
static int rev_is_head(const char *head, const char *name,
unsigned char *head_sha1, unsigned char *sha1)
{
- if (!head || (head_sha1 && sha1 && hashcmp(head_sha1, sha1)))
+ if (!head || (head_sha1 && sha1 && !hasheq(head_sha1, sha1)))
return 0;
skip_prefix(head, "refs/heads/", &head);
if (!skip_prefix(name, "refs/heads/", &name))
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index f6fb8991f3..40844870cf 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -1443,6 +1443,72 @@ static int module_clone(int argc, const char **argv, const char *prefix)
return 0;
}
+static void determine_submodule_update_strategy(struct repository *r,
+ int just_cloned,
+ const char *path,
+ const char *update,
+ struct submodule_update_strategy *out)
+{
+ const struct submodule *sub = submodule_from_path(r, &null_oid, path);
+ char *key;
+ const char *val;
+
+ key = xstrfmt("submodule.%s.update", sub->name);
+
+ if (update) {
+ trace_printf("parsing update");
+ if (parse_submodule_update_strategy(update, out) < 0)
+ die(_("Invalid update mode '%s' for submodule path '%s'"),
+ update, path);
+ } else if (!repo_config_get_string_const(r, key, &val)) {
+ if (parse_submodule_update_strategy(val, out) < 0)
+ die(_("Invalid update mode '%s' configured for submodule path '%s'"),
+ val, path);
+ } else if (sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) {
+ trace_printf("loaded thing");
+ out->type = sub->update_strategy.type;
+ out->command = sub->update_strategy.command;
+ } else
+ out->type = SM_UPDATE_CHECKOUT;
+
+ if (just_cloned &&
+ (out->type == SM_UPDATE_MERGE ||
+ out->type == SM_UPDATE_REBASE ||
+ out->type == SM_UPDATE_NONE))
+ out->type = SM_UPDATE_CHECKOUT;
+
+ free(key);
+}
+
+static int module_update_module_mode(int argc, const char **argv, const char *prefix)
+{
+ const char *path, *update = NULL;
+ int just_cloned;
+ struct submodule_update_strategy update_strategy = { .type = SM_UPDATE_CHECKOUT };
+
+ if (argc < 3 || argc > 4)
+ die("submodule--helper update-module-clone expects <just-cloned> <path> [<update>]");
+
+ just_cloned = git_config_int("just_cloned", argv[1]);
+ path = argv[2];
+
+ if (argc == 4)
+ update = argv[3];
+
+ determine_submodule_update_strategy(the_repository,
+ just_cloned, path, update,
+ &update_strategy);
+ fputs(submodule_strategy_to_string(&update_strategy), stdout);
+
+ return 0;
+}
+
+struct update_clone_data {
+ const struct submodule *sub;
+ struct object_id oid;
+ unsigned just_cloned;
+};
+
struct submodule_update_clone {
/* index into 'list', the list of submodules to look into for cloning */
int current;
@@ -1462,8 +1528,9 @@ struct submodule_update_clone {
const char *recursive_prefix;
const char *prefix;
- /* Machine-readable status lines to be consumed by git-submodule.sh */
- struct string_list projectlines;
+ /* to be consumed by git-submodule.sh */
+ struct update_clone_data *update_clone;
+ int update_clone_nr; int update_clone_alloc;
/* If we want to stop as fast as possible and return an error */
unsigned quickstop : 1;
@@ -1471,11 +1538,13 @@ struct submodule_update_clone {
/* failed clones to be retried again */
const struct cache_entry **failed_clones;
int failed_clones_nr, failed_clones_alloc;
+
+ int max_jobs;
};
#define SUBMODULE_UPDATE_CLONE_INIT {0, MODULE_LIST_INIT, 0, \
SUBMODULE_UPDATE_STRATEGY_INIT, 0, 0, -1, STRING_LIST_INIT_DUP, 0, \
NULL, NULL, NULL, \
- STRING_LIST_INIT_DUP, 0, NULL, 0, 0}
+ NULL, 0, 0, 0, NULL, 0, 0, 0}
static void next_submodule_warn_missing(struct submodule_update_clone *suc,
@@ -1569,11 +1638,12 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
strbuf_addf(&sb, "%s/.git", ce->name);
needs_cloning = !file_exists(sb.buf);
- strbuf_reset(&sb);
- strbuf_addf(&sb, "%06o %s %d %d\t%s\n", ce->ce_mode,
- oid_to_hex(&ce->oid), ce_stage(ce),
- needs_cloning, ce->name);
- string_list_append(&suc->projectlines, sb.buf);
+ ALLOC_GROW(suc->update_clone, suc->update_clone_nr + 1,
+ suc->update_clone_alloc);
+ oidcpy(&suc->update_clone[suc->update_clone_nr].oid, &ce->oid);
+ suc->update_clone[suc->update_clone_nr].just_cloned = needs_cloning;
+ suc->update_clone[suc->update_clone_nr].sub = sub;
+ suc->update_clone_nr++;
if (!needs_cloning)
goto cleanup;
@@ -1714,11 +1784,44 @@ static int git_update_clone_config(const char *var, const char *value,
return 0;
}
+static void update_submodule(struct update_clone_data *ucd)
+{
+ fprintf(stdout, "dummy %s %d\t%s\n",
+ oid_to_hex(&ucd->oid),
+ ucd->just_cloned,
+ ucd->sub->path);
+}
+
+static int update_submodules(struct submodule_update_clone *suc)
+{
+ int i;
+
+ run_processes_parallel(suc->max_jobs,
+ update_clone_get_next_task,
+ update_clone_start_failure,
+ update_clone_task_finished,
+ suc);
+
+ /*
+ * We saved the output and put it out all at once now.
+ * That means:
+ * - the listener does not have to interleave their (checkout)
+ * work with our fetching. The writes involved in a
+ * checkout involve more straightforward sequential I/O.
+ * - the listener can avoid doing any work if fetching failed.
+ */
+ if (suc->quickstop)
+ return 1;
+
+ for (i = 0; i < suc->update_clone_nr; i++)
+ update_submodule(&suc->update_clone[i]);
+
+ return 0;
+}
+
static int update_clone(int argc, const char **argv, const char *prefix)
{
const char *update = NULL;
- int max_jobs = 1;
- struct string_list_item *item;
struct pathspec pathspec;
struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
@@ -1740,7 +1843,7 @@ static int update_clone(int argc, const char **argv, const char *prefix)
OPT_STRING(0, "depth", &suc.depth, "<depth>",
N_("Create a shallow clone truncated to the "
"specified number of revisions")),
- OPT_INTEGER('j', "jobs", &max_jobs,
+ OPT_INTEGER('j', "jobs", &suc.max_jobs,
N_("parallel jobs")),
OPT_BOOL(0, "recommend-shallow", &suc.recommend_shallow,
N_("whether the initial clone should follow the shallow recommendation")),
@@ -1756,8 +1859,8 @@ static int update_clone(int argc, const char **argv, const char *prefix)
};
suc.prefix = prefix;
- update_clone_config_from_gitmodules(&max_jobs);
- git_config(git_update_clone_config, &max_jobs);
+ update_clone_config_from_gitmodules(&suc.max_jobs);
+ git_config(git_update_clone_config, &suc.max_jobs);
argc = parse_options(argc, argv, prefix, module_update_clone_options,
git_submodule_helper_usage, 0);
@@ -1772,27 +1875,7 @@ static int update_clone(int argc, const char **argv, const char *prefix)
if (pathspec.nr)
suc.warn_if_uninitialized = 1;
- run_processes_parallel(max_jobs,
- update_clone_get_next_task,
- update_clone_start_failure,
- update_clone_task_finished,
- &suc);
-
- /*
- * We saved the output and put it out all at once now.
- * That means:
- * - the listener does not have to interleave their (checkout)
- * work with our fetching. The writes involved in a
- * checkout involve more straightforward sequential I/O.
- * - the listener can avoid doing any work if fetching failed.
- */
- if (suc.quickstop)
- return 1;
-
- for_each_string_list_item(item, &suc.projectlines)
- fprintf(stdout, "%s", item->string);
-
- return 0;
+ return update_submodules(&suc);
}
static int resolve_relative_path(int argc, const char **argv, const char *prefix)
@@ -1938,6 +2021,45 @@ static int push_check(int argc, const char **argv, const char *prefix)
return 0;
}
+static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
+{
+ const struct submodule *sub;
+ const char *path;
+ char *cw;
+ struct repository subrepo;
+
+ if (argc != 2)
+ BUG("submodule--helper connect-gitdir-workingtree <name> <path>");
+
+ path = argv[1];
+
+ sub = submodule_from_path(the_repository, &null_oid, path);
+ if (!sub)
+ BUG("We could get the submodule handle before?");
+
+ if (repo_submodule_init(&subrepo, the_repository, path))
+ die(_("could not get a repository handle for submodule '%s'"), path);
+
+ if (!repo_config_get_string(&subrepo, "core.worktree", &cw)) {
+ char *cfg_file, *abs_path;
+ const char *rel_path;
+ struct strbuf sb = STRBUF_INIT;
+
+ cfg_file = repo_git_path(&subrepo, "config");
+
+ abs_path = absolute_pathdup(path);
+ rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
+
+ git_config_set_in_file(cfg_file, "core.worktree", rel_path);
+
+ free(cfg_file);
+ free(abs_path);
+ strbuf_release(&sb);
+ }
+
+ return 0;
+}
+
static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
{
int i;
@@ -2015,7 +2137,9 @@ static struct cmd_struct commands[] = {
{"list", module_list, 0},
{"name", module_name, 0},
{"clone", module_clone, 0},
+ {"update-module-mode", module_update_module_mode, 0},
{"update-clone", update_clone, 0},
+ {"ensure-core-worktree", ensure_core_worktree, 0},
{"relative-path", resolve_relative_path, 0},
{"resolve-relative-url", resolve_relative_url, 0},
{"resolve-relative-url-test", resolve_relative_url_test, 0},
diff --git a/builtin/tag.c b/builtin/tag.c
index 9a19ffb49f..f623632186 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -559,7 +559,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
ref_transaction_commit(transaction, &err))
die("%s", err.buf);
ref_transaction_free(transaction);
- if (force && !is_null_oid(&prev) && oidcmp(&prev, &object))
+ if (force && !is_null_oid(&prev) && !oideq(&prev, &object))
printf(_("Updated tag '%s' (was %s)\n"), tag,
find_unique_abbrev(&prev, DEFAULT_ABBREV));
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index 30d9413b4b..80478808b3 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -303,7 +303,7 @@ static void added_object(unsigned nr, enum object_type type,
struct delta_info *info;
while ((info = *p) != NULL) {
- if (!oidcmp(&info->base_oid, &obj_list[nr].oid) ||
+ if (oideq(&info->base_oid, &obj_list[nr].oid) ||
info->base_offset == obj_list[nr].offset) {
*p = info->next;
p = &delta_list;
@@ -579,7 +579,7 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
if (fsck_finish(&fsck_options))
die(_("fsck error in pack objects"));
}
- if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
+ if (!hasheq(fill(the_hash_algo->rawsz), oid.hash))
die("final sha1 did not match");
use(the_hash_algo->rawsz);
diff --git a/builtin/update-index.c b/builtin/update-index.c
index fe84003b4f..e7fab78b3b 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -669,7 +669,7 @@ static int unresolve_one(const char *path)
ret = -1;
goto free_return;
}
- if (!oidcmp(&ce_2->oid, &ce_3->oid) &&
+ if (oideq(&ce_2->oid, &ce_3->oid) &&
ce_2->ce_mode == ce_3->ce_mode) {
fprintf(stderr, "%s: identical in both, skipping.\n",
path);
@@ -754,7 +754,7 @@ static int do_reupdate(int ac, const char **av,
old = read_one_ent(NULL, &head_oid,
ce->name, ce_namelen(ce), 0);
if (old && ce->ce_mode == old->ce_mode &&
- !oidcmp(&ce->oid, &old->oid)) {
+ oideq(&ce->oid, &old->oid)) {
discard_cache_entry(old);
continue; /* unchanged */
}
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 41e7714396..c4abbde2b8 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -47,6 +47,26 @@ static int git_worktree_config(const char *var, const char *value, void *cb)
return git_default_config(var, value, cb);
}
+static int delete_git_dir(const char *id)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret;
+
+ strbuf_addstr(&sb, git_common_path("worktrees/%s", id));
+ ret = remove_dir_recursively(&sb, 0);
+ if (ret < 0 && errno == ENOTDIR)
+ ret = unlink(sb.buf);
+ if (ret)
+ error_errno(_("failed to delete '%s'"), sb.buf);
+ strbuf_release(&sb);
+ return ret;
+}
+
+static void delete_worktrees_dir_if_empty(void)
+{
+ rmdir(git_path("worktrees")); /* ignore failed removal */
+}
+
static int prune_worktree(const char *id, struct strbuf *reason)
{
struct stat st;
@@ -116,10 +136,8 @@ static int prune_worktree(const char *id, struct strbuf *reason)
static void prune_worktrees(void)
{
struct strbuf reason = STRBUF_INIT;
- struct strbuf path = STRBUF_INIT;
DIR *dir = opendir(git_path("worktrees"));
struct dirent *d;
- int ret;
if (!dir)
return;
while ((d = readdir(dir)) != NULL) {
@@ -132,18 +150,12 @@ static void prune_worktrees(void)
printf("%s\n", reason.buf);
if (show_only)
continue;
- git_path_buf(&path, "worktrees/%s", d->d_name);
- ret = remove_dir_recursively(&path, 0);
- if (ret < 0 && errno == ENOTDIR)
- ret = unlink(path.buf);
- if (ret)
- error_errno(_("failed to remove '%s'"), path.buf);
+ delete_git_dir(d->d_name);
}
closedir(dir);
if (!show_only)
- rmdir(git_path("worktrees"));
+ delete_worktrees_dir_if_empty();
strbuf_release(&reason);
- strbuf_release(&path);
}
static int prune(int ac, const char **av, const char *prefix)
@@ -212,6 +224,43 @@ static const char *worktree_basename(const char *path, int *olen)
return name;
}
+static void validate_worktree_add(const char *path, const struct add_opts *opts)
+{
+ struct worktree **worktrees;
+ struct worktree *wt;
+ int locked;
+
+ if (file_exists(path) && !is_empty_dir(path))
+ die(_("'%s' already exists"), path);
+
+ worktrees = get_worktrees(0);
+ /*
+ * find_worktree()'s suffix matching may undesirably find the main
+ * rather than a linked worktree (for instance, when the basenames
+ * of the main worktree and the one being created are the same).
+ * We're only interested in linked worktrees, so skip the main
+ * worktree with +1.
+ */
+ wt = find_worktree(worktrees + 1, NULL, path);
+ if (!wt)
+ goto done;
+
+ locked = !!is_worktree_locked(wt);
+ if ((!locked && opts->force) || (locked && opts->force > 1)) {
+ if (delete_git_dir(wt->id))
+ die(_("unable to re-add worktree '%s'"), path);
+ goto done;
+ }
+
+ if (locked)
+ die(_("'%s' is a missing but locked worktree;\nuse 'add -f -f' to override, or 'unlock' and 'prune' or 'remove' to clear"), path);
+ else
+ die(_("'%s' is a missing but already registered worktree;\nuse 'add -f' to override, or 'prune' or 'remove' to clear"), path);
+
+done:
+ free_worktrees(worktrees);
+}
+
static int add_worktree(const char *path, const char *refname,
const struct add_opts *opts)
{
@@ -226,8 +275,7 @@ static int add_worktree(const char *path, const char *refname,
struct commit *commit = NULL;
int is_branch = 0;
- if (file_exists(path) && !is_empty_dir(path))
- die(_("'%s' already exists"), path);
+ validate_worktree_add(path, opts);
/* is 'refname' a branch or commit? */
if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) &&
@@ -697,13 +745,17 @@ static void validate_no_submodules(const struct worktree *wt)
static int move_worktree(int ac, const char **av, const char *prefix)
{
+ int force = 0;
struct option options[] = {
+ OPT__FORCE(&force,
+ N_("force move even if worktree is dirty or locked"),
+ PARSE_OPT_NOCOMPLETE),
OPT_END()
};
struct worktree **worktrees, *wt;
struct strbuf dst = STRBUF_INIT;
struct strbuf errmsg = STRBUF_INIT;
- const char *reason;
+ const char *reason = NULL;
char *path;
ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
@@ -734,12 +786,13 @@ static int move_worktree(int ac, const char **av, const char *prefix)
validate_no_submodules(wt);
- reason = is_worktree_locked(wt);
+ if (force < 2)
+ reason = is_worktree_locked(wt);
if (reason) {
if (*reason)
- die(_("cannot move a locked working tree, lock reason: %s"),
+ die(_("cannot move a locked working tree, lock reason: %s\nuse 'move -f -f' to override or unlock first"),
reason);
- die(_("cannot move a locked working tree"));
+ die(_("cannot move a locked working tree;\nuse 'move -f -f' to override or unlock first"));
}
if (validate_worktree(wt, &errmsg, 0))
die(_("validation failed, cannot move working tree: %s"),
@@ -822,32 +875,18 @@ static int delete_git_work_tree(struct worktree *wt)
return ret;
}
-static int delete_git_dir(struct worktree *wt)
-{
- struct strbuf sb = STRBUF_INIT;
- int ret = 0;
-
- strbuf_addstr(&sb, git_common_path("worktrees/%s", wt->id));
- if (remove_dir_recursively(&sb, 0)) {
- error_errno(_("failed to delete '%s'"), sb.buf);
- ret = -1;
- }
- strbuf_release(&sb);
- return ret;
-}
-
static int remove_worktree(int ac, const char **av, const char *prefix)
{
int force = 0;
struct option options[] = {
OPT__FORCE(&force,
- N_("force removing even if the worktree is dirty"),
+ N_("force removal even if worktree is dirty or locked"),
PARSE_OPT_NOCOMPLETE),
OPT_END()
};
struct worktree **worktrees, *wt;
struct strbuf errmsg = STRBUF_INIT;
- const char *reason;
+ const char *reason = NULL;
int ret = 0;
ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
@@ -860,12 +899,13 @@ static int remove_worktree(int ac, const char **av, const char *prefix)
die(_("'%s' is not a working tree"), av[0]);
if (is_main_worktree(wt))
die(_("'%s' is a main working tree"), av[0]);
- reason = is_worktree_locked(wt);
+ if (force < 2)
+ reason = is_worktree_locked(wt);
if (reason) {
if (*reason)
- die(_("cannot remove a locked working tree, lock reason: %s"),
+ die(_("cannot remove a locked working tree, lock reason: %s\nuse 'remove -f -f' to override or unlock first"),
reason);
- die(_("cannot remove a locked working tree"));
+ die(_("cannot remove a locked working tree;\nuse 'remove -f -f' to override or unlock first"));
}
if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK))
die(_("validation failed, cannot remove working tree: %s"),
@@ -882,7 +922,8 @@ static int remove_worktree(int ac, const char **av, const char *prefix)
* continue on even if ret is non-zero, there's no going back
* from here.
*/
- ret |= delete_git_dir(wt);
+ ret |= delete_git_dir(wt->id);
+ delete_worktrees_dir_if_empty();
free_worktrees(worktrees);
return ret;
diff --git a/bulk-checkin.c b/bulk-checkin.c
index 9f3b644811..409ecb566b 100644
--- a/bulk-checkin.c
+++ b/bulk-checkin.c
@@ -72,7 +72,7 @@ static int already_written(struct bulk_checkin_state *state, struct object_id *o
/* Might want to keep the list sorted */
for (i = 0; i < state->nr_written; i++)
- if (!oidcmp(&state->written[i]->oid, oid))
+ if (oideq(&state->written[i]->oid, oid))
return 1;
/* This is a new object we need to keep */
diff --git a/bundle.c b/bundle.c
index 24cbe40986..14f2cfc248 100644
--- a/bundle.c
+++ b/bundle.c
@@ -369,7 +369,7 @@ static int write_bundle_refs(int bundle_fd, struct rev_info *revs)
* commit that is referenced by the tag, and not the tag
* itself.
*/
- if (oidcmp(&oid, &e->item->oid)) {
+ if (!oideq(&oid, &e->item->oid)) {
/*
* Is this the positive end of a range expressed
* in terms of a tag (e.g. v2.0 from the range
diff --git a/cache-tree.c b/cache-tree.c
index 16ea022c46..5ce51468f0 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -4,6 +4,7 @@
#include "tree-walk.h"
#include "cache-tree.h"
#include "object-store.h"
+#include "replace-object.h"
#ifndef DEBUG
#define DEBUG 0
@@ -433,7 +434,9 @@ int cache_tree_update(struct index_state *istate, int flags)
if (i)
return i;
+ trace_performance_enter();
i = update_one(it, cache, entries, "", 0, &skip, flags);
+ trace_performance_leave("cache_tree_update");
if (i < 0)
return i;
istate->cache_changed |= CACHE_TREE_CHANGED;
@@ -714,7 +717,84 @@ int cache_tree_matches_traversal(struct cache_tree *root,
it = find_cache_tree_from_traversal(root, info);
it = cache_tree_find(it, ent->path);
- if (it && it->entry_count > 0 && !oidcmp(ent->oid, &it->oid))
+ if (it && it->entry_count > 0 && oideq(ent->oid, &it->oid))
return it->entry_count;
return 0;
}
+
+static void verify_one(struct index_state *istate,
+ struct cache_tree *it,
+ struct strbuf *path)
+{
+ int i, pos, len = path->len;
+ struct strbuf tree_buf = STRBUF_INIT;
+ struct object_id new_oid;
+
+ for (i = 0; i < it->subtree_nr; i++) {
+ strbuf_addf(path, "%s/", it->down[i]->name);
+ verify_one(istate, it->down[i]->cache_tree, path);
+ strbuf_setlen(path, len);
+ }
+
+ if (it->entry_count < 0 ||
+ /* no verification on tests (t7003) that replace trees */
+ lookup_replace_object(the_repository, &it->oid) != &it->oid)
+ return;
+
+ if (path->len) {
+ pos = index_name_pos(istate, path->buf, path->len);
+ pos = -pos - 1;
+ } else {
+ pos = 0;
+ }
+
+ i = 0;
+ while (i < it->entry_count) {
+ struct cache_entry *ce = istate->cache[pos + i];
+ const char *slash;
+ struct cache_tree_sub *sub = NULL;
+ const struct object_id *oid;
+ const char *name;
+ unsigned mode;
+ int entlen;
+
+ if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE))
+ BUG("%s with flags 0x%x should not be in cache-tree",
+ ce->name, ce->ce_flags);
+ name = ce->name + path->len;
+ slash = strchr(name, '/');
+ if (slash) {
+ entlen = slash - name;
+ sub = find_subtree(it, ce->name + path->len, entlen, 0);
+ if (!sub || sub->cache_tree->entry_count < 0)
+ BUG("bad subtree '%.*s'", entlen, name);
+ oid = &sub->cache_tree->oid;
+ mode = S_IFDIR;
+ i += sub->cache_tree->entry_count;
+ } else {
+ oid = &ce->oid;
+ mode = ce->ce_mode;
+ entlen = ce_namelen(ce) - path->len;
+ i++;
+ }
+ strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
+ strbuf_add(&tree_buf, oid->hash, the_hash_algo->rawsz);
+ }
+ hash_object_file(tree_buf.buf, tree_buf.len, tree_type, &new_oid);
+ if (oidcmp(&new_oid, &it->oid))
+ BUG("cache-tree for path %.*s does not match. "
+ "Expected %s got %s", len, path->buf,
+ oid_to_hex(&new_oid), oid_to_hex(&it->oid));
+ strbuf_setlen(path, len);
+ strbuf_release(&tree_buf);
+}
+
+void cache_tree_verify(struct index_state *istate)
+{
+ struct strbuf path = STRBUF_INIT;
+
+ if (!istate->cache_tree)
+ return;
+ verify_one(istate, istate->cache_tree, &path);
+ strbuf_release(&path);
+}
diff --git a/cache-tree.h b/cache-tree.h
index fc0c842e77..0ab6784ffe 100644
--- a/cache-tree.h
+++ b/cache-tree.h
@@ -32,6 +32,7 @@ struct cache_tree *cache_tree_read(const char *buffer, unsigned long size);
int cache_tree_fully_valid(struct cache_tree *);
int cache_tree_update(struct index_state *, int);
+void cache_tree_verify(struct index_state *);
/* bitmasks to write_cache_as_tree flags */
#define WRITE_TREE_MISSING_OK 1
diff --git a/cache.h b/cache.h
index 4d014541ab..d508f3d4f8 100644
--- a/cache.h
+++ b/cache.h
@@ -1041,14 +1041,24 @@ static inline int oidcmp(const struct object_id *oid1, const struct object_id *o
return hashcmp(oid1->hash, oid2->hash);
}
+static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2)
+{
+ return !hashcmp(sha1, sha2);
+}
+
+static inline int oideq(const struct object_id *oid1, const struct object_id *oid2)
+{
+ return hasheq(oid1->hash, oid2->hash);
+}
+
static inline int is_null_sha1(const unsigned char *sha1)
{
- return !hashcmp(sha1, null_sha1);
+ return hasheq(sha1, null_sha1);
}
static inline int is_null_oid(const struct object_id *oid)
{
- return !hashcmp(oid->hash, null_sha1);
+ return hasheq(oid->hash, null_sha1);
}
static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
@@ -1085,22 +1095,22 @@ static inline void oidread(struct object_id *oid, const unsigned char *hash)
static inline int is_empty_blob_sha1(const unsigned char *sha1)
{
- return !hashcmp(sha1, the_hash_algo->empty_blob->hash);
+ return hasheq(sha1, the_hash_algo->empty_blob->hash);
}
static inline int is_empty_blob_oid(const struct object_id *oid)
{
- return !oidcmp(oid, the_hash_algo->empty_blob);
+ return oideq(oid, the_hash_algo->empty_blob);
}
static inline int is_empty_tree_sha1(const unsigned char *sha1)
{
- return !hashcmp(sha1, the_hash_algo->empty_tree->hash);
+ return hasheq(sha1, the_hash_algo->empty_tree->hash);
}
static inline int is_empty_tree_oid(const struct object_id *oid)
{
- return !oidcmp(oid, the_hash_algo->empty_tree);
+ return oideq(oid, the_hash_algo->empty_tree);
}
const char *empty_tree_oid_hex(void);
@@ -1518,6 +1528,7 @@ struct checkout {
unsigned force:1,
quiet:1,
not_new:1,
+ clone:1,
refresh_cache:1;
};
#define CHECKOUT_INIT { NULL, "" }
diff --git a/combine-diff.c b/combine-diff.c
index de7695e728..0fed4ca529 100644
--- a/combine-diff.c
+++ b/combine-diff.c
@@ -1138,8 +1138,8 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
for (i = 0; i < num_parent; i++) {
int j;
for (j = 0; j < i; j++) {
- if (!oidcmp(&elem->parent[i].oid,
- &elem->parent[j].oid)) {
+ if (oideq(&elem->parent[i].oid,
+ &elem->parent[j].oid)) {
reuse_combine_diff(sline, cnt, i, j);
break;
}
diff --git a/command-list.txt b/command-list.txt
index a9dda3b8af..c36ea3c182 100644
--- a/command-list.txt
+++ b/command-list.txt
@@ -123,6 +123,7 @@ git-merge-index plumbingmanipulators
git-merge-one-file purehelpers
git-mergetool ancillarymanipulators complete
git-merge-tree ancillaryinterrogators
+git-multi-pack-index plumbingmanipulators
git-mktag plumbingmanipulators
git-mktree plumbingmanipulators
git-mv mainporcelain worktree
diff --git a/commit-graph.c b/commit-graph.c
index 8a1bec7b8a..7f4519ec3b 100644
--- a/commit-graph.c
+++ b/commit-graph.c
@@ -213,8 +213,9 @@ static int prepare_commit_graph(struct repository *r)
return !!r->objects->commit_graph;
r->objects->commit_graph_attempted = 1;
- if (repo_config_get_bool(r, "core.commitgraph", &config_value) ||
- !config_value)
+ if (!git_env_bool(GIT_TEST_COMMIT_GRAPH, 0) &&
+ (repo_config_get_bool(r, "core.commitgraph", &config_value) ||
+ !config_value))
/*
* This repository is not configured to use commit graphs, so
* do not load one. (But report commit_graph_attempted anyway
@@ -233,6 +234,24 @@ static int prepare_commit_graph(struct repository *r)
return !!r->objects->commit_graph;
}
+int generation_numbers_enabled(struct repository *r)
+{
+ uint32_t first_generation;
+ struct commit_graph *g;
+ if (!prepare_commit_graph(r))
+ return 0;
+
+ g = r->objects->commit_graph;
+
+ if (!g->num_commits)
+ return 0;
+
+ first_generation = get_be32(g->chunk_commit_data +
+ g->hash_len + 8) >> 2;
+
+ return !!first_generation;
+}
+
static void close_commit_graph(void)
{
free_commit_graph(the_repository->objects->commit_graph);
@@ -765,7 +784,7 @@ void write_commit_graph(const char *obj_dir,
count_distinct = 1;
for (i = 1; i < oids.nr; i++) {
- if (oidcmp(&oids.list[i-1], &oids.list[i]))
+ if (!oideq(&oids.list[i - 1], &oids.list[i]))
count_distinct++;
}
@@ -779,7 +798,7 @@ void write_commit_graph(const char *obj_dir,
num_extra_edges = 0;
for (i = 0; i < oids.nr; i++) {
int num_parents = 0;
- if (i > 0 && !oidcmp(&oids.list[i-1], &oids.list[i]))
+ if (i > 0 && oideq(&oids.list[i - 1], &oids.list[i]))
continue;
commits.list[commits.nr] = lookup_commit(the_repository, &oids.list[i]);
@@ -900,7 +919,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
f = hashfd(devnull, NULL);
hashwrite(f, g->data, g->data_len - g->hash_len);
finalize_hashfile(f, checksum.hash, CSUM_CLOSE);
- if (hashcmp(checksum.hash, g->data + g->data_len - g->hash_len)) {
+ if (!hasheq(checksum.hash, g->data + g->data_len - g->hash_len)) {
graph_report(_("the commit-graph file has incorrect checksum and is likely corrupt"));
verify_commit_graph_error = VERIFY_COMMIT_GRAPH_ERROR_HASH;
}
@@ -960,7 +979,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
continue;
}
- if (oidcmp(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid,
+ if (!oideq(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid,
get_commit_tree_oid(odb_commit)))
graph_report("root tree OID for commit %s in commit-graph is %s != %s",
oid_to_hex(&cur_oid),
@@ -977,7 +996,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
break;
}
- if (oidcmp(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
+ if (!oideq(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
graph_report("commit-graph parent for %s is %s != %s",
oid_to_hex(&cur_oid),
oid_to_hex(&graph_parents->item->object.oid),
diff --git a/commit-graph.h b/commit-graph.h
index eea62f8c0e..b050476765 100644
--- a/commit-graph.h
+++ b/commit-graph.h
@@ -6,6 +6,8 @@
#include "string-list.h"
#include "cache.h"
+#define GIT_TEST_COMMIT_GRAPH "GIT_TEST_COMMIT_GRAPH"
+
struct commit;
char *get_commit_graph_filename(const char *obj_dir);
@@ -52,6 +54,12 @@ struct commit_graph {
struct commit_graph *load_commit_graph_one(const char *graph_file);
+/*
+ * Return 1 if and only if the repository has a commit-graph
+ * file and generation numbers are computed in that file.
+ */
+int generation_numbers_enabled(struct repository *r);
+
void write_commit_graph_reachable(const char *obj_dir, int append);
void write_commit_graph(const char *obj_dir,
struct string_list *pack_indexes,
diff --git a/commit-reach.c b/commit-reach.c
new file mode 100644
index 0000000000..00e5ceee6f
--- /dev/null
+++ b/commit-reach.c
@@ -0,0 +1,692 @@
+#include "cache.h"
+#include "commit.h"
+#include "commit-graph.h"
+#include "decorate.h"
+#include "prio-queue.h"
+#include "tree.h"
+#include "ref-filter.h"
+#include "revision.h"
+#include "tag.h"
+#include "commit-reach.h"
+
+/* Remember to update object flag allocation in object.h */
+#define REACHABLE (1u<<15)
+#define PARENT1 (1u<<16)
+#define PARENT2 (1u<<17)
+#define STALE (1u<<18)
+#define RESULT (1u<<19)
+
+static const unsigned all_flags = (PARENT1 | PARENT2 | STALE | RESULT);
+
+static int queue_has_nonstale(struct prio_queue *queue)
+{
+ int i;
+ for (i = 0; i < queue->nr; i++) {
+ struct commit *commit = queue->array[i].data;
+ if (!(commit->object.flags & STALE))
+ return 1;
+ }
+ return 0;
+}
+
+/* all input commits in one and twos[] must have been parsed! */
+static struct commit_list *paint_down_to_common(struct commit *one, int n,
+ struct commit **twos,
+ int min_generation)
+{
+ struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
+ struct commit_list *result = NULL;
+ int i;
+ uint32_t last_gen = GENERATION_NUMBER_INFINITY;
+
+ if (!min_generation)
+ queue.compare = compare_commits_by_commit_date;
+
+ one->object.flags |= PARENT1;
+ if (!n) {
+ commit_list_append(one, &result);
+ return result;
+ }
+ prio_queue_put(&queue, one);
+
+ for (i = 0; i < n; i++) {
+ twos[i]->object.flags |= PARENT2;
+ prio_queue_put(&queue, twos[i]);
+ }
+
+ while (queue_has_nonstale(&queue)) {
+ struct commit *commit = prio_queue_get(&queue);
+ struct commit_list *parents;
+ int flags;
+
+ if (min_generation && commit->generation > last_gen)
+ BUG("bad generation skip %8x > %8x at %s",
+ commit->generation, last_gen,
+ oid_to_hex(&commit->object.oid));
+ last_gen = commit->generation;
+
+ if (commit->generation < min_generation)
+ break;
+
+ flags = commit->object.flags & (PARENT1 | PARENT2 | STALE);
+ if (flags == (PARENT1 | PARENT2)) {
+ if (!(commit->object.flags & RESULT)) {
+ commit->object.flags |= RESULT;
+ commit_list_insert_by_date(commit, &result);
+ }
+ /* Mark parents of a found merge stale */
+ flags |= STALE;
+ }
+ parents = commit->parents;
+ while (parents) {
+ struct commit *p = parents->item;
+ parents = parents->next;
+ if ((p->object.flags & flags) == flags)
+ continue;
+ if (parse_commit(p))
+ return NULL;
+ p->object.flags |= flags;
+ prio_queue_put(&queue, p);
+ }
+ }
+
+ clear_prio_queue(&queue);
+ return result;
+}
+
+static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos)
+{
+ struct commit_list *list = NULL;
+ struct commit_list *result = NULL;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (one == twos[i])
+ /*
+ * We do not mark this even with RESULT so we do not
+ * have to clean it up.
+ */
+ return commit_list_insert(one, &result);
+ }
+
+ if (parse_commit(one))
+ return NULL;
+ for (i = 0; i < n; i++) {
+ if (parse_commit(twos[i]))
+ return NULL;
+ }
+
+ list = paint_down_to_common(one, n, twos, 0);
+
+ while (list) {
+ struct commit *commit = pop_commit(&list);
+ if (!(commit->object.flags & STALE))
+ commit_list_insert_by_date(commit, &result);
+ }
+ return result;
+}
+
+struct commit_list *get_octopus_merge_bases(struct commit_list *in)
+{
+ struct commit_list *i, *j, *k, *ret = NULL;
+
+ if (!in)
+ return ret;
+
+ commit_list_insert(in->item, &ret);
+
+ for (i = in->next; i; i = i->next) {
+ struct commit_list *new_commits = NULL, *end = NULL;
+
+ for (j = ret; j; j = j->next) {
+ struct commit_list *bases;
+ bases = get_merge_bases(i->item, j->item);
+ if (!new_commits)
+ new_commits = bases;
+ else
+ end->next = bases;
+ for (k = bases; k; k = k->next)
+ end = k;
+ }
+ ret = new_commits;
+ }
+ return ret;
+}
+
+static int remove_redundant(struct commit **array, int cnt)
+{
+ /*
+ * Some commit in the array may be an ancestor of
+ * another commit. Move such commit to the end of
+ * the array, and return the number of commits that
+ * are independent from each other.
+ */
+ struct commit **work;
+ unsigned char *redundant;
+ int *filled_index;
+ int i, j, filled;
+
+ work = xcalloc(cnt, sizeof(*work));
+ redundant = xcalloc(cnt, 1);
+ ALLOC_ARRAY(filled_index, cnt - 1);
+
+ for (i = 0; i < cnt; i++)
+ parse_commit(array[i]);
+ for (i = 0; i < cnt; i++) {
+ struct commit_list *common;
+ uint32_t min_generation = array[i]->generation;
+
+ if (redundant[i])
+ continue;
+ for (j = filled = 0; j < cnt; j++) {
+ if (i == j || redundant[j])
+ continue;
+ filled_index[filled] = j;
+ work[filled++] = array[j];
+
+ if (array[j]->generation < min_generation)
+ min_generation = array[j]->generation;
+ }
+ common = paint_down_to_common(array[i], filled, work,
+ min_generation);
+ if (array[i]->object.flags & PARENT2)
+ redundant[i] = 1;
+ for (j = 0; j < filled; j++)
+ if (work[j]->object.flags & PARENT1)
+ redundant[filled_index[j]] = 1;
+ clear_commit_marks(array[i], all_flags);
+ clear_commit_marks_many(filled, work, all_flags);
+ free_commit_list(common);
+ }
+
+ /* Now collect the result */
+ COPY_ARRAY(work, array, cnt);
+ for (i = filled = 0; i < cnt; i++)
+ if (!redundant[i])
+ array[filled++] = work[i];
+ for (j = filled, i = 0; i < cnt; i++)
+ if (redundant[i])
+ array[j++] = work[i];
+ free(work);
+ free(redundant);
+ free(filled_index);
+ return filled;
+}
+
+static struct commit_list *get_merge_bases_many_0(struct commit *one,
+ int n,
+ struct commit **twos,
+ int cleanup)
+{
+ struct commit_list *list;
+ struct commit **rslt;
+ struct commit_list *result;
+ int cnt, i;
+
+ result = merge_bases_many(one, n, twos);
+ for (i = 0; i < n; i++) {
+ if (one == twos[i])
+ return result;
+ }
+ if (!result || !result->next) {
+ if (cleanup) {
+ clear_commit_marks(one, all_flags);
+ clear_commit_marks_many(n, twos, all_flags);
+ }
+ return result;
+ }
+
+ /* There are more than one */
+ cnt = commit_list_count(result);
+ rslt = xcalloc(cnt, sizeof(*rslt));
+ for (list = result, i = 0; list; list = list->next)
+ rslt[i++] = list->item;
+ free_commit_list(result);
+
+ clear_commit_marks(one, all_flags);
+ clear_commit_marks_many(n, twos, all_flags);
+
+ cnt = remove_redundant(rslt, cnt);
+ result = NULL;
+ for (i = 0; i < cnt; i++)
+ commit_list_insert_by_date(rslt[i], &result);
+ free(rslt);
+ return result;
+}
+
+struct commit_list *get_merge_bases_many(struct commit *one,
+ int n,
+ struct commit **twos)
+{
+ return get_merge_bases_many_0(one, n, twos, 1);
+}
+
+struct commit_list *get_merge_bases_many_dirty(struct commit *one,
+ int n,
+ struct commit **twos)
+{
+ return get_merge_bases_many_0(one, n, twos, 0);
+}
+
+struct commit_list *get_merge_bases(struct commit *one, struct commit *two)
+{
+ return get_merge_bases_many_0(one, 1, &two, 1);
+}
+
+/*
+ * Is "commit" a descendant of one of the elements on the "with_commit" list?
+ */
+int is_descendant_of(struct commit *commit, struct commit_list *with_commit)
+{
+ if (!with_commit)
+ return 1;
+
+ if (generation_numbers_enabled(the_repository)) {
+ struct commit_list *from_list = NULL;
+ int result;
+ commit_list_insert(commit, &from_list);
+ result = can_all_from_reach(from_list, with_commit, 0);
+ free_commit_list(from_list);
+ return result;
+ } else {
+ while (with_commit) {
+ struct commit *other;
+
+ other = with_commit->item;
+ with_commit = with_commit->next;
+ if (in_merge_bases(other, commit))
+ return 1;
+ }
+ return 0;
+ }
+}
+
+/*
+ * Is "commit" an ancestor of one of the "references"?
+ */
+int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference)
+{
+ struct commit_list *bases;
+ int ret = 0, i;
+ uint32_t min_generation = GENERATION_NUMBER_INFINITY;
+
+ if (parse_commit(commit))
+ return ret;
+ for (i = 0; i < nr_reference; i++) {
+ if (parse_commit(reference[i]))
+ return ret;
+ if (reference[i]->generation < min_generation)
+ min_generation = reference[i]->generation;
+ }
+
+ if (commit->generation > min_generation)
+ return ret;
+
+ bases = paint_down_to_common(commit, nr_reference, reference, commit->generation);
+ if (commit->object.flags & PARENT2)
+ ret = 1;
+ clear_commit_marks(commit, all_flags);
+ clear_commit_marks_many(nr_reference, reference, all_flags);
+ free_commit_list(bases);
+ return ret;
+}
+
+/*
+ * Is "commit" an ancestor of (i.e. reachable from) the "reference"?
+ */
+int in_merge_bases(struct commit *commit, struct commit *reference)
+{
+ return in_merge_bases_many(commit, 1, &reference);
+}
+
+struct commit_list *reduce_heads(struct commit_list *heads)
+{
+ struct commit_list *p;
+ struct commit_list *result = NULL, **tail = &result;
+ struct commit **array;
+ int num_head, i;
+
+ if (!heads)
+ return NULL;
+
+ /* Uniquify */
+ for (p = heads; p; p = p->next)
+ p->item->object.flags &= ~STALE;
+ for (p = heads, num_head = 0; p; p = p->next) {
+ if (p->item->object.flags & STALE)
+ continue;
+ p->item->object.flags |= STALE;
+ num_head++;
+ }
+ array = xcalloc(num_head, sizeof(*array));
+ for (p = heads, i = 0; p; p = p->next) {
+ if (p->item->object.flags & STALE) {
+ array[i++] = p->item;
+ p->item->object.flags &= ~STALE;
+ }
+ }
+ num_head = remove_redundant(array, num_head);
+ for (i = 0; i < num_head; i++)
+ tail = &commit_list_insert(array[i], tail)->next;
+ free(array);
+ return result;
+}
+
+void reduce_heads_replace(struct commit_list **heads)
+{
+ struct commit_list *result = reduce_heads(*heads);
+ free_commit_list(*heads);
+ *heads = result;
+}
+
+int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
+{
+ struct object *o;
+ struct commit *old_commit, *new_commit;
+ struct commit_list *old_commit_list = NULL;
+
+ /*
+ * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
+ * old_commit. Otherwise we require --force.
+ */
+ o = deref_tag(the_repository, parse_object(the_repository, old_oid),
+ NULL, 0);
+ if (!o || o->type != OBJ_COMMIT)
+ return 0;
+ old_commit = (struct commit *) o;
+
+ o = deref_tag(the_repository, parse_object(the_repository, new_oid),
+ NULL, 0);
+ if (!o || o->type != OBJ_COMMIT)
+ return 0;
+ new_commit = (struct commit *) o;
+
+ if (parse_commit(new_commit) < 0)
+ return 0;
+
+ commit_list_insert(old_commit, &old_commit_list);
+ return is_descendant_of(new_commit, old_commit_list);
+}
+
+/*
+ * Mimicking the real stack, this stack lives on the heap, avoiding stack
+ * overflows.
+ *
+ * At each recursion step, the stack items points to the commits whose
+ * ancestors are to be inspected.
+ */
+struct contains_stack {
+ int nr, alloc;
+ struct contains_stack_entry {
+ struct commit *commit;
+ struct commit_list *parents;
+ } *contains_stack;
+};
+
+static int in_commit_list(const struct commit_list *want, struct commit *c)
+{
+ for (; want; want = want->next)
+ if (!oidcmp(&want->item->object.oid, &c->object.oid))
+ return 1;
+ return 0;
+}
+
+/*
+ * Test whether the candidate is contained in the list.
+ * Do not recurse to find out, though, but return -1 if inconclusive.
+ */
+static enum contains_result contains_test(struct commit *candidate,
+ const struct commit_list *want,
+ struct contains_cache *cache,
+ uint32_t cutoff)
+{
+ enum contains_result *cached = contains_cache_at(cache, candidate);
+
+ /* If we already have the answer cached, return that. */
+ if (*cached)
+ return *cached;
+
+ /* or are we it? */
+ if (in_commit_list(want, candidate)) {
+ *cached = CONTAINS_YES;
+ return CONTAINS_YES;
+ }
+
+ /* Otherwise, we don't know; prepare to recurse */
+ parse_commit_or_die(candidate);
+
+ if (candidate->generation < cutoff)
+ return CONTAINS_NO;
+
+ return CONTAINS_UNKNOWN;
+}
+
+static void push_to_contains_stack(struct commit *candidate, struct contains_stack *contains_stack)
+{
+ ALLOC_GROW(contains_stack->contains_stack, contains_stack->nr + 1, contains_stack->alloc);
+ contains_stack->contains_stack[contains_stack->nr].commit = candidate;
+ contains_stack->contains_stack[contains_stack->nr++].parents = candidate->parents;
+}
+
+static enum contains_result contains_tag_algo(struct commit *candidate,
+ const struct commit_list *want,
+ struct contains_cache *cache)
+{
+ struct contains_stack contains_stack = { 0, 0, NULL };
+ enum contains_result result;
+ uint32_t cutoff = GENERATION_NUMBER_INFINITY;
+ const struct commit_list *p;
+
+ for (p = want; p; p = p->next) {
+ struct commit *c = p->item;
+ load_commit_graph_info(the_repository, c);
+ if (c->generation < cutoff)
+ cutoff = c->generation;
+ }
+
+ result = contains_test(candidate, want, cache, cutoff);
+ if (result != CONTAINS_UNKNOWN)
+ return result;
+
+ push_to_contains_stack(candidate, &contains_stack);
+ while (contains_stack.nr) {
+ struct contains_stack_entry *entry = &contains_stack.contains_stack[contains_stack.nr - 1];
+ struct commit *commit = entry->commit;
+ struct commit_list *parents = entry->parents;
+
+ if (!parents) {
+ *contains_cache_at(cache, commit) = CONTAINS_NO;
+ contains_stack.nr--;
+ }
+ /*
+ * If we just popped the stack, parents->item has been marked,
+ * therefore contains_test will return a meaningful yes/no.
+ */
+ else switch (contains_test(parents->item, want, cache, cutoff)) {
+ case CONTAINS_YES:
+ *contains_cache_at(cache, commit) = CONTAINS_YES;
+ contains_stack.nr--;
+ break;
+ case CONTAINS_NO:
+ entry->parents = parents->next;
+ break;
+ case CONTAINS_UNKNOWN:
+ push_to_contains_stack(parents->item, &contains_stack);
+ break;
+ }
+ }
+ free(contains_stack.contains_stack);
+ return contains_test(candidate, want, cache, cutoff);
+}
+
+int commit_contains(struct ref_filter *filter, struct commit *commit,
+ struct commit_list *list, struct contains_cache *cache)
+{
+ if (filter->with_commit_tag_algo)
+ return contains_tag_algo(commit, list, cache) == CONTAINS_YES;
+ return is_descendant_of(commit, list);
+}
+
+static int compare_commits_by_gen(const void *_a, const void *_b)
+{
+ const struct commit *a = (const struct commit *)_a;
+ const struct commit *b = (const struct commit *)_b;
+
+ if (a->generation < b->generation)
+ return -1;
+ if (a->generation > b->generation)
+ return 1;
+ return 0;
+}
+
+int can_all_from_reach_with_flag(struct object_array *from,
+ unsigned int with_flag,
+ unsigned int assign_flag,
+ time_t min_commit_date,
+ uint32_t min_generation)
+{
+ struct commit **list = NULL;
+ int i;
+ int nr_commits;
+ int result = 1;
+
+ ALLOC_ARRAY(list, from->nr);
+ nr_commits = 0;
+ for (i = 0; i < from->nr; i++) {
+ struct object *from_one = from->objects[i].item;
+
+ if (!from_one || from_one->flags & assign_flag)
+ continue;
+
+ from_one = deref_tag(the_repository, from_one,
+ "a from object", 0);
+ if (!from_one || from_one->type != OBJ_COMMIT) {
+ /* no way to tell if this is reachable by
+ * looking at the ancestry chain alone, so
+ * leave a note to ourselves not to worry about
+ * this object anymore.
+ */
+ from->objects[i].item->flags |= assign_flag;
+ continue;
+ }
+
+ list[nr_commits] = (struct commit *)from_one;
+ if (parse_commit(list[nr_commits]) ||
+ list[nr_commits]->generation < min_generation) {
+ result = 0;
+ goto cleanup;
+ }
+
+ nr_commits++;
+ }
+
+ QSORT(list, nr_commits, compare_commits_by_gen);
+
+ for (i = 0; i < nr_commits; i++) {
+ /* DFS from list[i] */
+ struct commit_list *stack = NULL;
+
+ list[i]->object.flags |= assign_flag;
+ commit_list_insert(list[i], &stack);
+
+ while (stack) {
+ struct commit_list *parent;
+
+ if (stack->item->object.flags & with_flag) {
+ pop_commit(&stack);
+ continue;
+ }
+
+ for (parent = stack->item->parents; parent; parent = parent->next) {
+ if (parent->item->object.flags & (with_flag | RESULT))
+ stack->item->object.flags |= RESULT;
+
+ if (!(parent->item->object.flags & assign_flag)) {
+ parent->item->object.flags |= assign_flag;
+
+ if (parse_commit(parent->item) ||
+ parent->item->date < min_commit_date ||
+ parent->item->generation < min_generation)
+ continue;
+
+ commit_list_insert(parent->item, &stack);
+ break;
+ }
+ }
+
+ if (!parent)
+ pop_commit(&stack);
+ }
+
+ if (!(list[i]->object.flags & (with_flag | RESULT))) {
+ result = 0;
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ for (i = 0; i < nr_commits; i++) {
+ clear_commit_marks(list[i], RESULT);
+ clear_commit_marks(list[i], assign_flag);
+ }
+ free(list);
+
+ for (i = 0; i < from->nr; i++)
+ from->objects[i].item->flags &= ~assign_flag;
+
+ return result;
+}
+
+int can_all_from_reach(struct commit_list *from, struct commit_list *to,
+ int cutoff_by_min_date)
+{
+ struct object_array from_objs = OBJECT_ARRAY_INIT;
+ time_t min_commit_date = cutoff_by_min_date ? from->item->date : 0;
+ struct commit_list *from_iter = from, *to_iter = to;
+ int result;
+ uint32_t min_generation = GENERATION_NUMBER_INFINITY;
+
+ while (from_iter) {
+ add_object_array(&from_iter->item->object, NULL, &from_objs);
+
+ if (!parse_commit(from_iter->item)) {
+ if (from_iter->item->date < min_commit_date)
+ min_commit_date = from_iter->item->date;
+
+ if (from_iter->item->generation < min_generation)
+ min_generation = from_iter->item->generation;
+ }
+
+ from_iter = from_iter->next;
+ }
+
+ while (to_iter) {
+ if (!parse_commit(to_iter->item)) {
+ if (to_iter->item->date < min_commit_date)
+ min_commit_date = to_iter->item->date;
+
+ if (to_iter->item->generation < min_generation)
+ min_generation = to_iter->item->generation;
+ }
+
+ to_iter->item->object.flags |= PARENT2;
+
+ to_iter = to_iter->next;
+ }
+
+ result = can_all_from_reach_with_flag(&from_objs, PARENT2, PARENT1,
+ min_commit_date, min_generation);
+
+ while (from) {
+ clear_commit_marks(from->item, PARENT1);
+ from = from->next;
+ }
+
+ while (to) {
+ clear_commit_marks(to->item, PARENT2);
+ to = to->next;
+ }
+
+ object_array_clear(&from_objs);
+ return result;
+}
diff --git a/commit-reach.h b/commit-reach.h
new file mode 100644
index 0000000000..7d313e2975
--- /dev/null
+++ b/commit-reach.h
@@ -0,0 +1,77 @@
+#ifndef __COMMIT_REACH_H__
+#define __COMMIT_REACH_H__
+
+#include "commit-slab.h"
+
+struct commit;
+struct commit_list;
+struct contains_cache;
+struct ref_filter;
+
+struct commit_list *get_merge_bases_many(struct commit *one,
+ int n,
+ struct commit **twos);
+struct commit_list *get_merge_bases_many_dirty(struct commit *one,
+ int n,
+ struct commit **twos);
+struct commit_list *get_merge_bases(struct commit *one, struct commit *two);
+struct commit_list *get_octopus_merge_bases(struct commit_list *in);
+
+/* To be used only when object flags after this call no longer matter */
+struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos);
+
+int is_descendant_of(struct commit *commit, struct commit_list *with_commit);
+int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference);
+int in_merge_bases(struct commit *commit, struct commit *reference);
+
+/*
+ * Takes a list of commits and returns a new list where those
+ * have been removed that can be reached from other commits in
+ * the list. It is useful for, e.g., reducing the commits
+ * randomly thrown at the git-merge command and removing
+ * redundant commits that the user shouldn't have given to it.
+ *
+ * This function destroys the STALE bit of the commit objects'
+ * flags.
+ */
+struct commit_list *reduce_heads(struct commit_list *heads);
+
+/*
+ * Like `reduce_heads()`, except it replaces the list. Use this
+ * instead of `foo = reduce_heads(foo);` to avoid memory leaks.
+ */
+void reduce_heads_replace(struct commit_list **heads);
+
+int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid);
+
+/*
+ * Unknown has to be "0" here, because that's the default value for
+ * contains_cache slab entries that have not yet been assigned.
+ */
+enum contains_result {
+ CONTAINS_UNKNOWN = 0,
+ CONTAINS_NO,
+ CONTAINS_YES
+};
+
+define_commit_slab(contains_cache, enum contains_result);
+
+int commit_contains(struct ref_filter *filter, struct commit *commit,
+ struct commit_list *list, struct contains_cache *cache);
+
+/*
+ * Determine if every commit in 'from' can reach at least one commit
+ * that is marked with 'with_flag'. As we traverse, use 'assign_flag'
+ * as a marker for commits that are already visited. Do not walk
+ * commits with date below 'min_commit_date' or generation below
+ * 'min_generation'.
+ */
+int can_all_from_reach_with_flag(struct object_array *from,
+ unsigned int with_flag,
+ unsigned int assign_flag,
+ time_t min_commit_date,
+ uint32_t min_generation);
+int can_all_from_reach(struct commit_list *from, struct commit_list *to,
+ int commit_date_cutoff);
+
+#endif
diff --git a/commit.c b/commit.c
index 449c1f4920..d0f199e122 100644
--- a/commit.c
+++ b/commit.c
@@ -46,7 +46,7 @@ struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref
struct commit *c = lookup_commit_reference(the_repository, oid);
if (!c)
die(_("could not parse %s"), ref_name);
- if (oidcmp(oid, &c->object.oid)) {
+ if (!oideq(oid, &c->object.oid)) {
warning(_("%s %s is not a commit!"),
ref_name, oid_to_hex(oid));
}
@@ -843,367 +843,6 @@ void sort_in_topological_order(struct commit_list **list, enum rev_sort_order so
clear_author_date_slab(&author_date);
}
-/* merge-base stuff */
-
-/* Remember to update object flag allocation in object.h */
-#define PARENT1 (1u<<16)
-#define PARENT2 (1u<<17)
-#define STALE (1u<<18)
-#define RESULT (1u<<19)
-
-static const unsigned all_flags = (PARENT1 | PARENT2 | STALE | RESULT);
-
-static int queue_has_nonstale(struct prio_queue *queue)
-{
- int i;
- for (i = 0; i < queue->nr; i++) {
- struct commit *commit = queue->array[i].data;
- if (!(commit->object.flags & STALE))
- return 1;
- }
- return 0;
-}
-
-/* all input commits in one and twos[] must have been parsed! */
-static struct commit_list *paint_down_to_common(struct commit *one, int n,
- struct commit **twos,
- int min_generation)
-{
- struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
- struct commit_list *result = NULL;
- int i;
- uint32_t last_gen = GENERATION_NUMBER_INFINITY;
-
- if (!min_generation)
- queue.compare = compare_commits_by_commit_date;
-
- one->object.flags |= PARENT1;
- if (!n) {
- commit_list_append(one, &result);
- return result;
- }
- prio_queue_put(&queue, one);
-
- for (i = 0; i < n; i++) {
- twos[i]->object.flags |= PARENT2;
- prio_queue_put(&queue, twos[i]);
- }
-
- while (queue_has_nonstale(&queue)) {
- struct commit *commit = prio_queue_get(&queue);
- struct commit_list *parents;
- int flags;
-
- if (min_generation && commit->generation > last_gen)
- BUG("bad generation skip %8x > %8x at %s",
- commit->generation, last_gen,
- oid_to_hex(&commit->object.oid));
- last_gen = commit->generation;
-
- if (commit->generation < min_generation)
- break;
-
- flags = commit->object.flags & (PARENT1 | PARENT2 | STALE);
- if (flags == (PARENT1 | PARENT2)) {
- if (!(commit->object.flags & RESULT)) {
- commit->object.flags |= RESULT;
- commit_list_insert_by_date(commit, &result);
- }
- /* Mark parents of a found merge stale */
- flags |= STALE;
- }
- parents = commit->parents;
- while (parents) {
- struct commit *p = parents->item;
- parents = parents->next;
- if ((p->object.flags & flags) == flags)
- continue;
- if (parse_commit(p))
- return NULL;
- p->object.flags |= flags;
- prio_queue_put(&queue, p);
- }
- }
-
- clear_prio_queue(&queue);
- return result;
-}
-
-static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos)
-{
- struct commit_list *list = NULL;
- struct commit_list *result = NULL;
- int i;
-
- for (i = 0; i < n; i++) {
- if (one == twos[i])
- /*
- * We do not mark this even with RESULT so we do not
- * have to clean it up.
- */
- return commit_list_insert(one, &result);
- }
-
- if (parse_commit(one))
- return NULL;
- for (i = 0; i < n; i++) {
- if (parse_commit(twos[i]))
- return NULL;
- }
-
- list = paint_down_to_common(one, n, twos, 0);
-
- while (list) {
- struct commit *commit = pop_commit(&list);
- if (!(commit->object.flags & STALE))
- commit_list_insert_by_date(commit, &result);
- }
- return result;
-}
-
-struct commit_list *get_octopus_merge_bases(struct commit_list *in)
-{
- struct commit_list *i, *j, *k, *ret = NULL;
-
- if (!in)
- return ret;
-
- commit_list_insert(in->item, &ret);
-
- for (i = in->next; i; i = i->next) {
- struct commit_list *new_commits = NULL, *end = NULL;
-
- for (j = ret; j; j = j->next) {
- struct commit_list *bases;
- bases = get_merge_bases(i->item, j->item);
- if (!new_commits)
- new_commits = bases;
- else
- end->next = bases;
- for (k = bases; k; k = k->next)
- end = k;
- }
- ret = new_commits;
- }
- return ret;
-}
-
-static int remove_redundant(struct commit **array, int cnt)
-{
- /*
- * Some commit in the array may be an ancestor of
- * another commit. Move such commit to the end of
- * the array, and return the number of commits that
- * are independent from each other.
- */
- struct commit **work;
- unsigned char *redundant;
- int *filled_index;
- int i, j, filled;
-
- work = xcalloc(cnt, sizeof(*work));
- redundant = xcalloc(cnt, 1);
- ALLOC_ARRAY(filled_index, cnt - 1);
-
- for (i = 0; i < cnt; i++)
- parse_commit(array[i]);
- for (i = 0; i < cnt; i++) {
- struct commit_list *common;
- uint32_t min_generation = array[i]->generation;
-
- if (redundant[i])
- continue;
- for (j = filled = 0; j < cnt; j++) {
- if (i == j || redundant[j])
- continue;
- filled_index[filled] = j;
- work[filled++] = array[j];
-
- if (array[j]->generation < min_generation)
- min_generation = array[j]->generation;
- }
- common = paint_down_to_common(array[i], filled, work,
- min_generation);
- if (array[i]->object.flags & PARENT2)
- redundant[i] = 1;
- for (j = 0; j < filled; j++)
- if (work[j]->object.flags & PARENT1)
- redundant[filled_index[j]] = 1;
- clear_commit_marks(array[i], all_flags);
- clear_commit_marks_many(filled, work, all_flags);
- free_commit_list(common);
- }
-
- /* Now collect the result */
- COPY_ARRAY(work, array, cnt);
- for (i = filled = 0; i < cnt; i++)
- if (!redundant[i])
- array[filled++] = work[i];
- for (j = filled, i = 0; i < cnt; i++)
- if (redundant[i])
- array[j++] = work[i];
- free(work);
- free(redundant);
- free(filled_index);
- return filled;
-}
-
-static struct commit_list *get_merge_bases_many_0(struct commit *one,
- int n,
- struct commit **twos,
- int cleanup)
-{
- struct commit_list *list;
- struct commit **rslt;
- struct commit_list *result;
- int cnt, i;
-
- result = merge_bases_many(one, n, twos);
- for (i = 0; i < n; i++) {
- if (one == twos[i])
- return result;
- }
- if (!result || !result->next) {
- if (cleanup) {
- clear_commit_marks(one, all_flags);
- clear_commit_marks_many(n, twos, all_flags);
- }
- return result;
- }
-
- /* There are more than one */
- cnt = commit_list_count(result);
- rslt = xcalloc(cnt, sizeof(*rslt));
- for (list = result, i = 0; list; list = list->next)
- rslt[i++] = list->item;
- free_commit_list(result);
-
- clear_commit_marks(one, all_flags);
- clear_commit_marks_many(n, twos, all_flags);
-
- cnt = remove_redundant(rslt, cnt);
- result = NULL;
- for (i = 0; i < cnt; i++)
- commit_list_insert_by_date(rslt[i], &result);
- free(rslt);
- return result;
-}
-
-struct commit_list *get_merge_bases_many(struct commit *one,
- int n,
- struct commit **twos)
-{
- return get_merge_bases_many_0(one, n, twos, 1);
-}
-
-struct commit_list *get_merge_bases_many_dirty(struct commit *one,
- int n,
- struct commit **twos)
-{
- return get_merge_bases_many_0(one, n, twos, 0);
-}
-
-struct commit_list *get_merge_bases(struct commit *one, struct commit *two)
-{
- return get_merge_bases_many_0(one, 1, &two, 1);
-}
-
-/*
- * Is "commit" a descendant of one of the elements on the "with_commit" list?
- */
-int is_descendant_of(struct commit *commit, struct commit_list *with_commit)
-{
- if (!with_commit)
- return 1;
- while (with_commit) {
- struct commit *other;
-
- other = with_commit->item;
- with_commit = with_commit->next;
- if (in_merge_bases(other, commit))
- return 1;
- }
- return 0;
-}
-
-/*
- * Is "commit" an ancestor of one of the "references"?
- */
-int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference)
-{
- struct commit_list *bases;
- int ret = 0, i;
- uint32_t min_generation = GENERATION_NUMBER_INFINITY;
-
- if (parse_commit(commit))
- return ret;
- for (i = 0; i < nr_reference; i++) {
- if (parse_commit(reference[i]))
- return ret;
- if (reference[i]->generation < min_generation)
- min_generation = reference[i]->generation;
- }
-
- if (commit->generation > min_generation)
- return ret;
-
- bases = paint_down_to_common(commit, nr_reference, reference, commit->generation);
- if (commit->object.flags & PARENT2)
- ret = 1;
- clear_commit_marks(commit, all_flags);
- clear_commit_marks_many(nr_reference, reference, all_flags);
- free_commit_list(bases);
- return ret;
-}
-
-/*
- * Is "commit" an ancestor of (i.e. reachable from) the "reference"?
- */
-int in_merge_bases(struct commit *commit, struct commit *reference)
-{
- return in_merge_bases_many(commit, 1, &reference);
-}
-
-struct commit_list *reduce_heads(struct commit_list *heads)
-{
- struct commit_list *p;
- struct commit_list *result = NULL, **tail = &result;
- struct commit **array;
- int num_head, i;
-
- if (!heads)
- return NULL;
-
- /* Uniquify */
- for (p = heads; p; p = p->next)
- p->item->object.flags &= ~STALE;
- for (p = heads, num_head = 0; p; p = p->next) {
- if (p->item->object.flags & STALE)
- continue;
- p->item->object.flags |= STALE;
- num_head++;
- }
- array = xcalloc(num_head, sizeof(*array));
- for (p = heads, i = 0; p; p = p->next) {
- if (p->item->object.flags & STALE) {
- array[i++] = p->item;
- p->item->object.flags &= ~STALE;
- }
- }
- num_head = remove_redundant(array, num_head);
- for (i = 0; i < num_head; i++)
- tail = &commit_list_insert(array[i], tail)->next;
- free(array);
- return result;
-}
-
-void reduce_heads_replace(struct commit_list **heads)
-{
- struct commit_list *result = reduce_heads(*heads);
- free_commit_list(*heads);
- *heads = result;
-}
-
static const char gpg_sig_header[] = "gpgsig";
static const int gpg_sig_header_len = sizeof(gpg_sig_header) - 1;
@@ -1787,10 +1426,10 @@ const char *find_commit_header(const char *msg, const char *key, size_t *out_len
* Returns the number of bytes from the tail to ignore, to be fed as
* the second parameter to append_signoff().
*/
-int ignore_non_trailer(const char *buf, size_t len)
+size_t ignore_non_trailer(const char *buf, size_t len)
{
- int boc = 0;
- int bol = 0;
+ size_t boc = 0;
+ size_t bol = 0;
int in_old_conflicts_block = 0;
size_t cutoff = wt_status_locate_end(buf, len);
diff --git a/commit.h b/commit.h
index da0db36eba..2b1a734388 100644
--- a/commit.h
+++ b/commit.h
@@ -204,13 +204,6 @@ struct commit_graft *read_graft_line(struct strbuf *line);
int register_commit_graft(struct repository *r, struct commit_graft *, int);
struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid);
-extern struct commit_list *get_merge_bases(struct commit *rev1, struct commit *rev2);
-extern struct commit_list *get_merge_bases_many(struct commit *one, int n, struct commit **twos);
-extern struct commit_list *get_octopus_merge_bases(struct commit_list *in);
-
-/* To be used only when object flags after this call no longer matter */
-extern struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos);
-
/* largest positive number a signed 32-bit integer can contain */
#define INFINITE_DEPTH 0x7fffffff
@@ -258,32 +251,10 @@ extern int delayed_reachability_test(struct shallow_info *si, int c);
extern void prune_shallow(int show_only);
extern struct trace_key trace_shallow;
-int is_descendant_of(struct commit *, struct commit_list *);
-int in_merge_bases(struct commit *, struct commit *);
-int in_merge_bases_many(struct commit *, int, struct commit **);
-
extern int interactive_add(int argc, const char **argv, const char *prefix, int patch);
extern int run_add_interactive(const char *revision, const char *patch_mode,
const struct pathspec *pathspec);
-/*
- * Takes a list of commits and returns a new list where those
- * have been removed that can be reached from other commits in
- * the list. It is useful for, e.g., reducing the commits
- * randomly thrown at the git-merge command and removing
- * redundant commits that the user shouldn't have given to it.
- *
- * This function destroys the STALE bit of the commit objects'
- * flags.
- */
-extern struct commit_list *reduce_heads(struct commit_list *heads);
-
-/*
- * Like `reduce_heads()`, except it replaces the list. Use this
- * instead of `foo = reduce_heads(foo);` to avoid memory leaks.
- */
-extern void reduce_heads_replace(struct commit_list **heads);
-
struct commit_extra_header {
struct commit_extra_header *next;
char *key;
@@ -322,7 +293,7 @@ extern const char *find_commit_header(const char *msg, const char *key,
size_t *out_len);
/* Find the end of the log message, the right place for a new trailer. */
-extern int ignore_non_trailer(const char *buf, size_t len);
+extern size_t ignore_non_trailer(const char *buf, size_t len);
typedef int (*each_mergetag_fn)(struct commit *commit, struct commit_extra_header *extra,
void *cb_data);
diff --git a/compat/mingw.c b/compat/mingw.c
index 858ca14a57..18caf21969 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -341,6 +341,19 @@ int mingw_mkdir(const char *path, int mode)
return ret;
}
+/*
+ * Calling CreateFile() using FILE_APPEND_DATA and without FILE_WRITE_DATA
+ * is documented in [1] as opening a writable file handle in append mode.
+ * (It is believed that) this is atomic since it is maintained by the
+ * kernel unlike the O_APPEND flag which is racily maintained by the CRT.
+ *
+ * [1] https://docs.microsoft.com/en-us/windows/desktop/fileio/file-access-rights-constants
+ *
+ * This trick does not appear to work for named pipes. Instead it creates
+ * a named pipe client handle that cannot be written to. Callers should
+ * just use the regular _wopen() for them. (And since client handle gets
+ * bound to a unique server handle, it isn't really an issue.)
+ */
static int mingw_open_append(wchar_t const *wfilename, int oflags, ...)
{
HANDLE handle;
@@ -360,10 +373,12 @@ static int mingw_open_append(wchar_t const *wfilename, int oflags, ...)
NULL, create, FILE_ATTRIBUTE_NORMAL, NULL);
if (handle == INVALID_HANDLE_VALUE)
return errno = err_win_to_posix(GetLastError()), -1;
+
/*
* No O_APPEND here, because the CRT uses it only to reset the
- * file pointer to EOF on write(); but that is not necessary
- * for a file created with FILE_APPEND_DATA.
+ * file pointer to EOF before each write(); but that is not
+ * necessary (and may lead to races) for a file created with
+ * FILE_APPEND_DATA.
*/
fd = _open_osfhandle((intptr_t)handle, O_BINARY);
if (fd < 0)
@@ -371,6 +386,21 @@ static int mingw_open_append(wchar_t const *wfilename, int oflags, ...)
return fd;
}
+/*
+ * Does the pathname map to the local named pipe filesystem?
+ * That is, does it have a "//./pipe/" prefix?
+ */
+static int is_local_named_pipe_path(const char *filename)
+{
+ return (is_dir_sep(filename[0]) &&
+ is_dir_sep(filename[1]) &&
+ filename[2] == '.' &&
+ is_dir_sep(filename[3]) &&
+ !strncasecmp(filename+4, "pipe", 4) &&
+ is_dir_sep(filename[8]) &&
+ filename[9]);
+}
+
int mingw_open (const char *filename, int oflags, ...)
{
typedef int (*open_fn_t)(wchar_t const *wfilename, int oflags, ...);
@@ -387,7 +417,7 @@ int mingw_open (const char *filename, int oflags, ...)
if (filename && !strcmp(filename, "/dev/null"))
filename = "nul";
- if (oflags & O_APPEND)
+ if ((oflags & O_APPEND) && !is_local_named_pipe_path(filename))
open_fn = mingw_open_append;
else
open_fn = _wopen;
diff --git a/config.mak.dev b/config.mak.dev
index 9a998149d9..92d268137f 100644
--- a/config.mak.dev
+++ b/config.mak.dev
@@ -7,6 +7,7 @@ CFLAGS += -pedantic
CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
endif
CFLAGS += -Wdeclaration-after-statement
+CFLAGS += -Wformat-security
CFLAGS += -Wno-format-zero-length
CFLAGS += -Wold-style-definition
CFLAGS += -Woverflow
diff --git a/connect.c b/connect.c
index 94547e5056..24281b6082 100644
--- a/connect.c
+++ b/connect.c
@@ -224,7 +224,7 @@ static int process_dummy_ref(const char *line)
return 0;
name++;
- return !oidcmp(&null_oid, &oid) && !strcmp(name, "capabilities^{}");
+ return oideq(&null_oid, &oid) && !strcmp(name, "capabilities^{}");
}
static void check_no_capabilities(const char *line, int len)
diff --git a/contrib/coccinelle/commit.cocci b/contrib/coccinelle/commit.cocci
index aec3345adb..c49aa558f0 100644
--- a/contrib/coccinelle/commit.cocci
+++ b/contrib/coccinelle/commit.cocci
@@ -15,10 +15,10 @@ expression c;
identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph_one|load_tree_for_commit)$";
expression c;
@@
- f(...) {...
+ f(...) {<...
- c->maybe_tree
+ get_commit_tree(c)
- ...}
+ ...>}
@@
expression c;
diff --git a/contrib/coccinelle/object_id.cocci b/contrib/coccinelle/object_id.cocci
index 09afdbf994..d8bdb48712 100644
--- a/contrib/coccinelle/object_id.cocci
+++ b/contrib/coccinelle/object_id.cocci
@@ -20,10 +20,10 @@ expression E1;
identifier f != oid_to_hex;
expression E1;
@@
- f(...) {...
+ f(...) {<...
- sha1_to_hex(E1->hash)
+ oid_to_hex(E1)
- ...}
+ ...>}
@@
expression E1, E2;
@@ -35,10 +35,10 @@ expression E1, E2;
identifier f != oid_to_hex_r;
expression E1, E2;
@@
- f(...) {...
+ f(...) {<...
- sha1_to_hex_r(E1, E2->hash)
+ oid_to_hex_r(E1, E2)
- ...}
+ ...>}
@@
expression E1;
@@ -50,10 +50,10 @@ expression E1;
identifier f != oidclr;
expression E1;
@@
- f(...) {...
+ f(...) {<...
- hashclr(E1->hash)
+ oidclr(E1)
- ...}
+ ...>}
@@
expression E1, E2;
@@ -65,10 +65,10 @@ expression E1, E2;
identifier f != oidcmp;
expression E1, E2;
@@
- f(...) {...
+ f(...) {<...
- hashcmp(E1->hash, E2->hash)
+ oidcmp(E1, E2)
- ...}
+ ...>}
@@
expression E1, E2;
@@ -92,10 +92,10 @@ expression E1, E2;
identifier f != oidcpy;
expression E1, E2;
@@
- f(...) {...
+ f(...) {<...
- hashcpy(E1->hash, E2->hash)
+ oidcpy(E1, E2)
- ...}
+ ...>}
@@
expression E1, E2;
@@ -108,3 +108,33 @@ expression E1, E2;
@@
- hashcpy(E1.hash, E2->hash)
+ oidcpy(&E1, E2)
+
+@@
+expression E1, E2;
+@@
+- oidcmp(E1, E2) == 0
++ oideq(E1, E2)
+
+@@
+identifier f != hasheq;
+expression E1, E2;
+@@
+ f(...) {<...
+- hashcmp(E1, E2) == 0
++ hasheq(E1, E2)
+ ...>}
+
+@@
+expression E1, E2;
+@@
+- oidcmp(E1, E2) != 0
++ !oideq(E1, E2)
+
+@@
+identifier f != hasheq;
+expression E1, E2;
+@@
+ f(...) {<...
+- hashcmp(E1, E2) != 0
++ !hasheq(E1, E2)
+ ...>}
diff --git a/convert.c b/convert.c
index 6057f1f580..e0848226d2 100644
--- a/convert.c
+++ b/convert.c
@@ -1297,6 +1297,7 @@ static void convert_attrs(const struct index_state *istate,
struct conv_attrs *ca, const char *path)
{
static struct attr_check *check;
+ struct attr_check_item *ccheck = NULL;
if (!check) {
check = attr_check_initl("crlf", "ident", "filter",
@@ -1306,30 +1307,25 @@ static void convert_attrs(const struct index_state *istate,
git_config(read_convert_config, NULL);
}
- if (!git_check_attr(istate, path, check)) {
- struct attr_check_item *ccheck = check->items;
- ca->crlf_action = git_path_check_crlf(ccheck + 4);
- if (ca->crlf_action == CRLF_UNDEFINED)
- ca->crlf_action = git_path_check_crlf(ccheck + 0);
- ca->ident = git_path_check_ident(ccheck + 1);
- ca->drv = git_path_check_convert(ccheck + 2);
- if (ca->crlf_action != CRLF_BINARY) {
- enum eol eol_attr = git_path_check_eol(ccheck + 3);
- if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_LF)
- ca->crlf_action = CRLF_AUTO_INPUT;
- else if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_CRLF)
- ca->crlf_action = CRLF_AUTO_CRLF;
- else if (eol_attr == EOL_LF)
- ca->crlf_action = CRLF_TEXT_INPUT;
- else if (eol_attr == EOL_CRLF)
- ca->crlf_action = CRLF_TEXT_CRLF;
- }
- ca->working_tree_encoding = git_path_check_encoding(ccheck + 5);
- } else {
- ca->drv = NULL;
- ca->crlf_action = CRLF_UNDEFINED;
- ca->ident = 0;
+ git_check_attr(istate, path, check);
+ ccheck = check->items;
+ ca->crlf_action = git_path_check_crlf(ccheck + 4);
+ if (ca->crlf_action == CRLF_UNDEFINED)
+ ca->crlf_action = git_path_check_crlf(ccheck + 0);
+ ca->ident = git_path_check_ident(ccheck + 1);
+ ca->drv = git_path_check_convert(ccheck + 2);
+ if (ca->crlf_action != CRLF_BINARY) {
+ enum eol eol_attr = git_path_check_eol(ccheck + 3);
+ if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_LF)
+ ca->crlf_action = CRLF_AUTO_INPUT;
+ else if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_CRLF)
+ ca->crlf_action = CRLF_AUTO_CRLF;
+ else if (eol_attr == EOL_LF)
+ ca->crlf_action = CRLF_TEXT_INPUT;
+ else if (eol_attr == EOL_CRLF)
+ ca->crlf_action = CRLF_TEXT_CRLF;
}
+ ca->working_tree_encoding = git_path_check_encoding(ccheck + 5);
/* Save attr and make a decision for action */
ca->attr_action = ca->crlf_action;
diff --git a/delta-islands.c b/delta-islands.c
new file mode 100644
index 0000000000..8e5018e406
--- /dev/null
+++ b/delta-islands.c
@@ -0,0 +1,502 @@
+#include "cache.h"
+#include "attr.h"
+#include "object.h"
+#include "blob.h"
+#include "commit.h"
+#include "tag.h"
+#include "tree.h"
+#include "delta.h"
+#include "pack.h"
+#include "tree-walk.h"
+#include "diff.h"
+#include "revision.h"
+#include "list-objects.h"
+#include "progress.h"
+#include "refs.h"
+#include "khash.h"
+#include "pack-bitmap.h"
+#include "pack-objects.h"
+#include "delta-islands.h"
+#include "sha1-array.h"
+#include "config.h"
+
+KHASH_INIT(str, const char *, void *, 1, kh_str_hash_func, kh_str_hash_equal)
+
+static khash_sha1 *island_marks;
+static unsigned island_counter;
+static unsigned island_counter_core;
+
+static kh_str_t *remote_islands;
+
+struct remote_island {
+ uint64_t hash;
+ struct oid_array oids;
+};
+
+struct island_bitmap {
+ uint32_t refcount;
+ uint32_t bits[FLEX_ARRAY];
+};
+
+static uint32_t island_bitmap_size;
+
+/*
+ * Allocate a new bitmap; if "old" is not NULL, the new bitmap will be a copy
+ * of "old". Otherwise, the new bitmap is empty.
+ */
+static struct island_bitmap *island_bitmap_new(const struct island_bitmap *old)
+{
+ size_t size = sizeof(struct island_bitmap) + (island_bitmap_size * 4);
+ struct island_bitmap *b = xcalloc(1, size);
+
+ if (old)
+ memcpy(b, old, size);
+
+ b->refcount = 1;
+ return b;
+}
+
+static void island_bitmap_or(struct island_bitmap *a, const struct island_bitmap *b)
+{
+ uint32_t i;
+
+ for (i = 0; i < island_bitmap_size; ++i)
+ a->bits[i] |= b->bits[i];
+}
+
+static int island_bitmap_is_subset(struct island_bitmap *self,
+ struct island_bitmap *super)
+{
+ uint32_t i;
+
+ if (self == super)
+ return 1;
+
+ for (i = 0; i < island_bitmap_size; ++i) {
+ if ((self->bits[i] & super->bits[i]) != self->bits[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+#define ISLAND_BITMAP_BLOCK(x) (x / 32)
+#define ISLAND_BITMAP_MASK(x) (1 << (x % 32))
+
+static void island_bitmap_set(struct island_bitmap *self, uint32_t i)
+{
+ self->bits[ISLAND_BITMAP_BLOCK(i)] |= ISLAND_BITMAP_MASK(i);
+}
+
+static int island_bitmap_get(struct island_bitmap *self, uint32_t i)
+{
+ return (self->bits[ISLAND_BITMAP_BLOCK(i)] & ISLAND_BITMAP_MASK(i)) != 0;
+}
+
+int in_same_island(const struct object_id *trg_oid, const struct object_id *src_oid)
+{
+ khiter_t trg_pos, src_pos;
+
+ /* If we aren't using islands, assume everything goes together. */
+ if (!island_marks)
+ return 1;
+
+ /*
+ * If we don't have a bitmap for the target, we can delta it
+ * against anything -- it's not an important object
+ */
+ trg_pos = kh_get_sha1(island_marks, trg_oid->hash);
+ if (trg_pos >= kh_end(island_marks))
+ return 1;
+
+ /*
+ * if the source (our delta base) doesn't have a bitmap,
+ * we don't want to base any deltas on it!
+ */
+ src_pos = kh_get_sha1(island_marks, src_oid->hash);
+ if (src_pos >= kh_end(island_marks))
+ return 0;
+
+ return island_bitmap_is_subset(kh_value(island_marks, trg_pos),
+ kh_value(island_marks, src_pos));
+}
+
+int island_delta_cmp(const struct object_id *a, const struct object_id *b)
+{
+ khiter_t a_pos, b_pos;
+ struct island_bitmap *a_bitmap = NULL, *b_bitmap = NULL;
+
+ if (!island_marks)
+ return 0;
+
+ a_pos = kh_get_sha1(island_marks, a->hash);
+ if (a_pos < kh_end(island_marks))
+ a_bitmap = kh_value(island_marks, a_pos);
+
+ b_pos = kh_get_sha1(island_marks, b->hash);
+ if (b_pos < kh_end(island_marks))
+ b_bitmap = kh_value(island_marks, b_pos);
+
+ if (a_bitmap) {
+ if (!b_bitmap || !island_bitmap_is_subset(a_bitmap, b_bitmap))
+ return -1;
+ }
+ if (b_bitmap) {
+ if (!a_bitmap || !island_bitmap_is_subset(b_bitmap, a_bitmap))
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct island_bitmap *create_or_get_island_marks(struct object *obj)
+{
+ khiter_t pos;
+ int hash_ret;
+
+ pos = kh_put_sha1(island_marks, obj->oid.hash, &hash_ret);
+ if (hash_ret)
+ kh_value(island_marks, pos) = island_bitmap_new(NULL);
+
+ return kh_value(island_marks, pos);
+}
+
+static void set_island_marks(struct object *obj, struct island_bitmap *marks)
+{
+ struct island_bitmap *b;
+ khiter_t pos;
+ int hash_ret;
+
+ pos = kh_put_sha1(island_marks, obj->oid.hash, &hash_ret);
+ if (hash_ret) {
+ /*
+ * We don't have one yet; make a copy-on-write of the
+ * parent.
+ */
+ marks->refcount++;
+ kh_value(island_marks, pos) = marks;
+ return;
+ }
+
+ /*
+ * We do have it. Make sure we split any copy-on-write before
+ * updating.
+ */
+ b = kh_value(island_marks, pos);
+ if (b->refcount > 1) {
+ b->refcount--;
+ b = kh_value(island_marks, pos) = island_bitmap_new(b);
+ }
+ island_bitmap_or(b, marks);
+}
+
+static void mark_remote_island_1(struct remote_island *rl, int is_core_island)
+{
+ uint32_t i;
+
+ for (i = 0; i < rl->oids.nr; ++i) {
+ struct island_bitmap *marks;
+ struct object *obj = parse_object(the_repository, &rl->oids.oid[i]);
+
+ if (!obj)
+ continue;
+
+ marks = create_or_get_island_marks(obj);
+ island_bitmap_set(marks, island_counter);
+
+ if (is_core_island && obj->type == OBJ_COMMIT)
+ obj->flags |= NEEDS_BITMAP;
+
+ /* If it was a tag, also make sure we hit the underlying object. */
+ while (obj && obj->type == OBJ_TAG) {
+ obj = ((struct tag *)obj)->tagged;
+ if (obj) {
+ parse_object(the_repository, &obj->oid);
+ marks = create_or_get_island_marks(obj);
+ island_bitmap_set(marks, island_counter);
+ }
+ }
+ }
+
+ if (is_core_island)
+ island_counter_core = island_counter;
+
+ island_counter++;
+}
+
+struct tree_islands_todo {
+ struct object_entry *entry;
+ unsigned int depth;
+};
+
+static int tree_depth_compare(const void *a, const void *b)
+{
+ const struct tree_islands_todo *todo_a = a;
+ const struct tree_islands_todo *todo_b = b;
+
+ return todo_a->depth - todo_b->depth;
+}
+
+void resolve_tree_islands(int progress, struct packing_data *to_pack)
+{
+ struct progress *progress_state = NULL;
+ struct tree_islands_todo *todo;
+ int nr = 0;
+ int i;
+
+ if (!island_marks)
+ return;
+
+ /*
+ * We process only trees, as commits and tags have already been handled
+ * (and passed their marks on to root trees, as well. We must make sure
+ * to process them in descending tree-depth order so that marks
+ * propagate down the tree properly, even if a sub-tree is found in
+ * multiple parent trees.
+ */
+ ALLOC_ARRAY(todo, to_pack->nr_objects);
+ for (i = 0; i < to_pack->nr_objects; i++) {
+ if (oe_type(&to_pack->objects[i]) == OBJ_TREE) {
+ todo[nr].entry = &to_pack->objects[i];
+ todo[nr].depth = oe_tree_depth(to_pack, &to_pack->objects[i]);
+ nr++;
+ }
+ }
+ QSORT(todo, nr, tree_depth_compare);
+
+ if (progress)
+ progress_state = start_progress(_("Propagating island marks"), nr);
+
+ for (i = 0; i < nr; i++) {
+ struct object_entry *ent = todo[i].entry;
+ struct island_bitmap *root_marks;
+ struct tree *tree;
+ struct tree_desc desc;
+ struct name_entry entry;
+ khiter_t pos;
+
+ pos = kh_get_sha1(island_marks, ent->idx.oid.hash);
+ if (pos >= kh_end(island_marks))
+ continue;
+
+ root_marks = kh_value(island_marks, pos);
+
+ tree = lookup_tree(the_repository, &ent->idx.oid);
+ if (!tree || parse_tree(tree) < 0)
+ die(_("bad tree object %s"), oid_to_hex(&ent->idx.oid));
+
+ init_tree_desc(&desc, tree->buffer, tree->size);
+ while (tree_entry(&desc, &entry)) {
+ struct object *obj;
+
+ if (S_ISGITLINK(entry.mode))
+ continue;
+
+ obj = lookup_object(the_repository, entry.oid->hash);
+ if (!obj)
+ continue;
+
+ set_island_marks(obj, root_marks);
+ }
+
+ free_tree_buffer(tree);
+
+ display_progress(progress_state, i+1);
+ }
+
+ stop_progress(&progress_state);
+ free(todo);
+}
+
+static regex_t *island_regexes;
+static unsigned int island_regexes_alloc, island_regexes_nr;
+static const char *core_island_name;
+
+static int island_config_callback(const char *k, const char *v, void *cb)
+{
+ if (!strcmp(k, "pack.island")) {
+ struct strbuf re = STRBUF_INIT;
+
+ if (!v)
+ return config_error_nonbool(k);
+
+ ALLOC_GROW(island_regexes, island_regexes_nr + 1, island_regexes_alloc);
+
+ if (*v != '^')
+ strbuf_addch(&re, '^');
+ strbuf_addstr(&re, v);
+
+ if (regcomp(&island_regexes[island_regexes_nr], re.buf, REG_EXTENDED))
+ die(_("failed to load island regex for '%s': %s"), k, re.buf);
+
+ strbuf_release(&re);
+ island_regexes_nr++;
+ return 0;
+ }
+
+ if (!strcmp(k, "pack.islandcore"))
+ return git_config_string(&core_island_name, k, v);
+
+ return 0;
+}
+
+static void add_ref_to_island(const char *island_name, const struct object_id *oid)
+{
+ uint64_t sha_core;
+ struct remote_island *rl = NULL;
+
+ int hash_ret;
+ khiter_t pos = kh_put_str(remote_islands, island_name, &hash_ret);
+
+ if (hash_ret) {
+ kh_key(remote_islands, pos) = xstrdup(island_name);
+ kh_value(remote_islands, pos) = xcalloc(1, sizeof(struct remote_island));
+ }
+
+ rl = kh_value(remote_islands, pos);
+ oid_array_append(&rl->oids, oid);
+
+ memcpy(&sha_core, oid->hash, sizeof(uint64_t));
+ rl->hash += sha_core;
+}
+
+static int find_island_for_ref(const char *refname, const struct object_id *oid,
+ int flags, void *data)
+{
+ /*
+ * We should advertise 'ARRAY_SIZE(matches) - 2' as the max,
+ * so we can diagnose below a config with more capture groups
+ * than we support.
+ */
+ regmatch_t matches[16];
+ int i, m;
+ struct strbuf island_name = STRBUF_INIT;
+
+ /* walk backwards to get last-one-wins ordering */
+ for (i = island_regexes_nr - 1; i >= 0; i--) {
+ if (!regexec(&island_regexes[i], refname,
+ ARRAY_SIZE(matches), matches, 0))
+ break;
+ }
+
+ if (i < 0)
+ return 0;
+
+ if (matches[ARRAY_SIZE(matches) - 1].rm_so != -1)
+ warning(_("island regex from config has "
+ "too many capture groups (max=%d)"),
+ (int)ARRAY_SIZE(matches) - 2);
+
+ for (m = 1; m < ARRAY_SIZE(matches); m++) {
+ regmatch_t *match = &matches[m];
+
+ if (match->rm_so == -1)
+ continue;
+
+ if (island_name.len)
+ strbuf_addch(&island_name, '-');
+
+ strbuf_add(&island_name, refname + match->rm_so, match->rm_eo - match->rm_so);
+ }
+
+ add_ref_to_island(island_name.buf, oid);
+ strbuf_release(&island_name);
+ return 0;
+}
+
+static struct remote_island *get_core_island(void)
+{
+ if (core_island_name) {
+ khiter_t pos = kh_get_str(remote_islands, core_island_name);
+ if (pos < kh_end(remote_islands))
+ return kh_value(remote_islands, pos);
+ }
+
+ return NULL;
+}
+
+static void deduplicate_islands(void)
+{
+ struct remote_island *island, *core = NULL, **list;
+ unsigned int island_count, dst, src, ref, i = 0;
+
+ island_count = kh_size(remote_islands);
+ ALLOC_ARRAY(list, island_count);
+
+ kh_foreach_value(remote_islands, island, {
+ list[i++] = island;
+ });
+
+ for (ref = 0; ref + 1 < island_count; ref++) {
+ for (src = ref + 1, dst = src; src < island_count; src++) {
+ if (list[ref]->hash == list[src]->hash)
+ continue;
+
+ if (src != dst)
+ list[dst] = list[src];
+
+ dst++;
+ }
+ island_count = dst;
+ }
+
+ island_bitmap_size = (island_count / 32) + 1;
+ core = get_core_island();
+
+ for (i = 0; i < island_count; ++i) {
+ mark_remote_island_1(list[i], core && list[i]->hash == core->hash);
+ }
+
+ free(list);
+}
+
+void load_delta_islands(void)
+{
+ island_marks = kh_init_sha1();
+ remote_islands = kh_init_str();
+
+ git_config(island_config_callback, NULL);
+ for_each_ref(find_island_for_ref, NULL);
+ deduplicate_islands();
+
+ fprintf(stderr, _("Marked %d islands, done.\n"), island_counter);
+}
+
+void propagate_island_marks(struct commit *commit)
+{
+ khiter_t pos = kh_get_sha1(island_marks, commit->object.oid.hash);
+
+ if (pos < kh_end(island_marks)) {
+ struct commit_list *p;
+ struct island_bitmap *root_marks = kh_value(island_marks, pos);
+
+ parse_commit(commit);
+ set_island_marks(&get_commit_tree(commit)->object, root_marks);
+ for (p = commit->parents; p; p = p->next)
+ set_island_marks(&p->item->object, root_marks);
+ }
+}
+
+int compute_pack_layers(struct packing_data *to_pack)
+{
+ uint32_t i;
+
+ if (!core_island_name || !island_marks)
+ return 1;
+
+ for (i = 0; i < to_pack->nr_objects; ++i) {
+ struct object_entry *entry = &to_pack->objects[i];
+ khiter_t pos = kh_get_sha1(island_marks, entry->idx.oid.hash);
+
+ oe_set_layer(to_pack, entry, 1);
+
+ if (pos < kh_end(island_marks)) {
+ struct island_bitmap *bitmap = kh_value(island_marks, pos);
+
+ if (island_bitmap_get(bitmap, island_counter_core))
+ oe_set_layer(to_pack, entry, 0);
+ }
+ }
+
+ return 2;
+}
diff --git a/delta-islands.h b/delta-islands.h
new file mode 100644
index 0000000000..f9725730f4
--- /dev/null
+++ b/delta-islands.h
@@ -0,0 +1,11 @@
+#ifndef DELTA_ISLANDS_H
+#define DELTA_ISLANDS_H
+
+int island_delta_cmp(const struct object_id *a, const struct object_id *b);
+int in_same_island(const struct object_id *, const struct object_id *);
+void resolve_tree_islands(int progress, struct packing_data *to_pack);
+void load_delta_islands(void);
+void propagate_island_marks(struct commit *commit);
+int compute_pack_layers(struct packing_data *to_pack);
+
+#endif /* DELTA_ISLANDS_H */
diff --git a/diff-lib.c b/diff-lib.c
index 88a98b1c06..30bf9a2399 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -342,7 +342,7 @@ static int show_modified(struct rev_info *revs,
}
if (revs->combine_merges && !cached &&
- (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) {
+ (!oideq(oid, &old_entry->oid) || !oideq(&old_entry->oid, &new_entry->oid))) {
struct combine_diff_path *p;
int pathlen = ce_namelen(new_entry);
@@ -366,7 +366,7 @@ static int show_modified(struct rev_info *revs,
}
oldmode = old_entry->ce_mode;
- if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule &&
+ if (mode == oldmode && oideq(oid, &old_entry->oid) && !dirty_submodule &&
!revs->diffopt.flags.find_copies_harder)
return 0;
@@ -518,11 +518,11 @@ static int diff_cache(struct rev_info *revs,
int run_diff_index(struct rev_info *revs, int cached)
{
struct object_array_entry *ent;
- uint64_t start = getnanotime();
if (revs->pending.nr != 1)
BUG("run_diff_index must be passed exactly one tree");
+ trace_performance_enter();
ent = revs->pending.objects;
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
exit(128);
@@ -531,7 +531,7 @@ int run_diff_index(struct rev_info *revs, int cached)
diffcore_fix_diff_index(&revs->diffopt);
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
- trace_performance_since(start, "diff-index");
+ trace_performance_leave("diff-index");
return 0;
}
diff --git a/diff.c b/diff.c
index 145cfbae59..f0c7557b40 100644
--- a/diff.c
+++ b/diff.c
@@ -624,42 +624,54 @@ static void check_blank_at_eof(mmfile_t *mf1, mmfile_t *mf2,
}
static void emit_line_0(struct diff_options *o,
- const char *set, unsigned reverse, const char *reset,
+ const char *set_sign, const char *set, unsigned reverse, const char *reset,
int first, const char *line, int len)
{
int has_trailing_newline, has_trailing_carriage_return;
- int nofirst;
+ int needs_reset = 0; /* at the end of the line */
FILE *file = o->file;
- if (first)
- fputs(diff_line_prefix(o), file);
- else if (!len)
- return;
+ fputs(diff_line_prefix(o), file);
- if (len == 0) {
- has_trailing_newline = (first == '\n');
- has_trailing_carriage_return = (!has_trailing_newline &&
- (first == '\r'));
- nofirst = has_trailing_newline || has_trailing_carriage_return;
- } else {
- has_trailing_newline = (len > 0 && line[len-1] == '\n');
- if (has_trailing_newline)
- len--;
- has_trailing_carriage_return = (len > 0 && line[len-1] == '\r');
- if (has_trailing_carriage_return)
- len--;
- nofirst = 0;
+ has_trailing_newline = (len > 0 && line[len-1] == '\n');
+ if (has_trailing_newline)
+ len--;
+
+ has_trailing_carriage_return = (len > 0 && line[len-1] == '\r');
+ if (has_trailing_carriage_return)
+ len--;
+
+ if (!len && !first)
+ goto end_of_line;
+
+ if (reverse && want_color(o->use_color)) {
+ fputs(GIT_COLOR_REVERSE, file);
+ needs_reset = 1;
}
- if (len || !nofirst) {
- if (reverse && want_color(o->use_color))
- fputs(GIT_COLOR_REVERSE, file);
+ if (set_sign) {
+ fputs(set_sign, file);
+ needs_reset = 1;
+ }
+
+ if (first)
+ fputc(first, file);
+
+ if (!len)
+ goto end_of_line;
+
+ if (set) {
+ if (set_sign && set != set_sign)
+ fputs(reset, file);
fputs(set, file);
- if (first && !nofirst)
- fputc(first, file);
- fwrite(line, len, 1, file);
- fputs(reset, file);
+ needs_reset = 1;
}
+ fwrite(line, len, 1, file);
+ needs_reset = 1; /* 'line' may contain color codes. */
+
+end_of_line:
+ if (needs_reset)
+ fputs(reset, file);
if (has_trailing_carriage_return)
fputc('\r', file);
if (has_trailing_newline)
@@ -669,7 +681,7 @@ static void emit_line_0(struct diff_options *o,
static void emit_line(struct diff_options *o, const char *set, const char *reset,
const char *line, int len)
{
- emit_line_0(o, set, 0, reset, line[0], line+1, len-1);
+ emit_line_0(o, set, NULL, 0, reset, 0, line, len);
}
enum diff_symbol {
@@ -968,8 +980,13 @@ static void pmb_advance_or_null_multi_match(struct diff_options *o,
/* Carry the white space delta forward */
pmb[i]->next_line->wsd = pmb[i]->wsd;
pmb[i] = pmb[i]->next_line;
- } else
+ } else {
+ if (pmb[i]->wsd) {
+ free(pmb[i]->wsd->string);
+ FREE_AND_NULL(pmb[i]->wsd);
+ }
pmb[i] = NULL;
+ }
}
}
@@ -990,10 +1007,6 @@ static int shrink_potential_moved_blocks(struct moved_entry **pmb,
if (lp < pmb_nr && rp > -1 && lp < rp) {
pmb[lp] = pmb[rp];
- if (pmb[rp]->wsd) {
- free(pmb[rp]->wsd->string);
- FREE_AND_NULL(pmb[rp]->wsd);
- }
pmb[rp] = NULL;
rp--;
lp++;
@@ -1187,9 +1200,9 @@ static void dim_moved_lines(struct diff_options *o)
}
static void emit_line_ws_markup(struct diff_options *o,
- const char *set, const char *reset,
- const char *line, int len,
- const char *set_sign, char sign,
+ const char *set_sign, const char *set,
+ const char *reset,
+ char sign, const char *line, int len,
unsigned ws_rule, int blank_at_eof)
{
const char *ws = NULL;
@@ -1201,18 +1214,15 @@ static void emit_line_ws_markup(struct diff_options *o,
}
if (!ws && !set_sign)
- emit_line_0(o, set, 0, reset, sign, line, len);
+ emit_line_0(o, set, NULL, 0, reset, sign, line, len);
else if (!ws) {
- /* Emit just the prefix, then the rest. */
- emit_line_0(o, set_sign ? set_sign : set, !!set_sign, reset,
- sign, "", 0);
- emit_line_0(o, set, 0, reset, 0, line, len);
+ emit_line_0(o, set_sign, set, !!set_sign, reset, sign, line, len);
} else if (blank_at_eof)
/* Blank line at EOF - paint '+' as well */
- emit_line_0(o, ws, 0, reset, sign, line, len);
+ emit_line_0(o, ws, NULL, 0, reset, sign, line, len);
else {
/* Emit just the prefix, then the rest. */
- emit_line_0(o, set_sign ? set_sign : set, !!set_sign, reset,
+ emit_line_0(o, set_sign ? set_sign : set, NULL, !!set_sign, reset,
sign, "", 0);
ws_check_emit(line, len, ws_rule,
o->file, set, reset, ws);
@@ -1236,7 +1246,7 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
context = diff_get_color_opt(o, DIFF_CONTEXT);
reset = diff_get_color_opt(o, DIFF_RESET);
putc('\n', o->file);
- emit_line_0(o, context, 0, reset, '\\',
+ emit_line_0(o, context, NULL, 0, reset, '\\',
nneof, strlen(nneof));
break;
case DIFF_SYMBOL_SUBMODULE_HEADER:
@@ -1274,7 +1284,9 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
else if (c == '-')
set = diff_get_color_opt(o, DIFF_FILE_OLD);
}
- emit_line_ws_markup(o, set, reset, line, len, set_sign, ' ',
+ emit_line_ws_markup(o, set_sign, set, reset,
+ o->output_indicators[OUTPUT_INDICATOR_CONTEXT],
+ line, len,
flags & (DIFF_SYMBOL_CONTENT_WS_MASK), 0);
break;
case DIFF_SYMBOL_PLUS:
@@ -1317,7 +1329,9 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
set = diff_get_color_opt(o, DIFF_CONTEXT_BOLD);
flags &= ~DIFF_SYMBOL_CONTENT_WS_MASK;
}
- emit_line_ws_markup(o, set, reset, line, len, set_sign, '+',
+ emit_line_ws_markup(o, set_sign, set, reset,
+ o->output_indicators[OUTPUT_INDICATOR_NEW],
+ line, len,
flags & DIFF_SYMBOL_CONTENT_WS_MASK,
flags & DIFF_SYMBOL_CONTENT_BLANK_LINE_EOF);
break;
@@ -1360,7 +1374,9 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
else
set = diff_get_color_opt(o, DIFF_CONTEXT_DIM);
}
- emit_line_ws_markup(o, set, reset, line, len, set_sign, '-',
+ emit_line_ws_markup(o, set_sign, set, reset,
+ o->output_indicators[OUTPUT_INDICATOR_OLD],
+ line, len,
flags & DIFF_SYMBOL_CONTENT_WS_MASK, 0);
break;
case DIFF_SYMBOL_WORDS_PORCELAIN:
@@ -2933,16 +2949,11 @@ static void show_dirstat(struct diff_options *options)
struct diff_filepair *p = q->queue[i];
const char *name;
unsigned long copied, added, damage;
- int content_changed;
name = p->two->path ? p->two->path : p->one->path;
- if (p->one->oid_valid && p->two->oid_valid)
- content_changed = oidcmp(&p->one->oid, &p->two->oid);
- else
- content_changed = 1;
-
- if (!content_changed) {
+ if (p->one->oid_valid && p->two->oid_valid &&
+ oideq(&p->one->oid, &p->two->oid)) {
/*
* The SHA1 has not changed, so pre-/post-content is
* identical. We can therefore skip looking at the
@@ -2989,7 +3000,7 @@ static void show_dirstat(struct diff_options *options)
* made to the preimage.
* If the resulting damage is zero, we know that
* diffcore_count_changes() considers the two entries to
- * be identical, but since content_changed is true, we
+ * be identical, but since the oid changed, we
* know that there must have been _some_ kind of change,
* so we force all entries to have damage > 0.
*/
@@ -3404,7 +3415,7 @@ static void builtin_diff(const char *name_a,
if (!one->data && !two->data &&
S_ISREG(one->mode) && S_ISREG(two->mode) &&
!o->flags.binary) {
- if (!oidcmp(&one->oid, &two->oid)) {
+ if (oideq(&one->oid, &two->oid)) {
if (must_show_header)
emit_diff_symbol(o, DIFF_SYMBOL_HEADER,
header.buf, header.len,
@@ -3569,7 +3580,7 @@ static void builtin_diffstat(const char *name_a, const char *name_b,
return;
}
- same_contents = !oidcmp(&one->oid, &two->oid);
+ same_contents = oideq(&one->oid, &two->oid);
if (diff_filespec_is_binary(one) || diff_filespec_is_binary(two)) {
data->is_binary = 1;
@@ -3765,7 +3776,7 @@ static int reuse_worktree_file(const char *name, const struct object_id *oid, in
* This is not the sha1 we are looking for, or
* unreusable because it is not a regular file.
*/
- if (oidcmp(oid, &ce->oid) || !S_ISREG(ce->ce_mode))
+ if (!oideq(oid, &ce->oid) || !S_ISREG(ce->ce_mode))
return 0;
/*
@@ -4170,7 +4181,7 @@ static void fill_metainfo(struct strbuf *msg,
default:
*must_show_header = 0;
}
- if (one && two && oidcmp(&one->oid, &two->oid)) {
+ if (one && two && !oideq(&one->oid, &two->oid)) {
const unsigned hexsz = the_hash_algo->hexsz;
int abbrev = o->flags.full_index ? hexsz : DEFAULT_ABBREV;
@@ -4375,6 +4386,9 @@ void diff_setup(struct diff_options *options)
options->file = stdout;
+ options->output_indicators[OUTPUT_INDICATOR_NEW] = '+';
+ options->output_indicators[OUTPUT_INDICATOR_OLD] = '-';
+ options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = ' ';
options->abbrev = DEFAULT_ABBREV;
options->line_termination = '\n';
options->break_opt = -1;
@@ -4852,6 +4866,12 @@ int diff_opt_parse(struct diff_options *options,
options->output_format |= DIFF_FORMAT_DIFFSTAT;
} else if (!strcmp(arg, "--no-compact-summary"))
options->flags.stat_with_summary = 0;
+ else if (skip_prefix(arg, "--output-indicator-new=", &arg))
+ options->output_indicators[OUTPUT_INDICATOR_NEW] = arg[0];
+ else if (skip_prefix(arg, "--output-indicator-old=", &arg))
+ options->output_indicators[OUTPUT_INDICATOR_OLD] = arg[0];
+ else if (skip_prefix(arg, "--output-indicator-context=", &arg))
+ options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = arg[0];
/* renames options */
else if (starts_with(arg, "-B") ||
@@ -5323,7 +5343,7 @@ int diff_unmodified_pair(struct diff_filepair *p)
* dealing with a change.
*/
if (one->oid_valid && two->oid_valid &&
- !oidcmp(&one->oid, &two->oid) &&
+ oideq(&one->oid, &two->oid) &&
!one->dirty_submodule && !two->dirty_submodule)
return 1; /* no change */
if (!one->oid_valid && !two->oid_valid)
@@ -5457,7 +5477,7 @@ static void diff_resolve_rename_copy(void)
else
p->status = DIFF_STATUS_RENAMED;
}
- else if (oidcmp(&p->one->oid, &p->two->oid) ||
+ else if (!oideq(&p->one->oid, &p->two->oid) ||
p->one->mode != p->two->mode ||
p->one->dirty_submodule ||
p->two->dirty_submodule ||
diff --git a/diff.h b/diff.h
index 89544e64bc..a30cc35ec3 100644
--- a/diff.h
+++ b/diff.h
@@ -194,6 +194,11 @@ struct diff_options {
FILE *file;
int close_file;
+#define OUTPUT_INDICATOR_NEW 0
+#define OUTPUT_INDICATOR_OLD 1
+#define OUTPUT_INDICATOR_CONTEXT 2
+ char output_indicators[3];
+
struct pathspec pathspec;
pathchange_fn_t pathchange;
change_fn_t change;
diff --git a/diffcore-break.c b/diffcore-break.c
index c64359f489..e11fcfdb39 100644
--- a/diffcore-break.c
+++ b/diffcore-break.c
@@ -58,7 +58,7 @@ static int should_break(struct diff_filespec *src,
}
if (src->oid_valid && dst->oid_valid &&
- !oidcmp(&src->oid, &dst->oid))
+ oideq(&src->oid, &dst->oid))
return 0; /* they are the same */
if (diff_populate_filespec(src, 0) || diff_populate_filespec(dst, 0))
diff --git a/diffcore-rename.c b/diffcore-rename.c
index d775183c2f..daddd9b28a 100644
--- a/diffcore-rename.c
+++ b/diffcore-rename.c
@@ -286,7 +286,7 @@ static int find_identical_files(struct hashmap *srcs,
struct diff_filespec *source = p->filespec;
/* False hash collision? */
- if (oidcmp(&source->oid, &target->oid))
+ if (!oideq(&source->oid, &target->oid))
continue;
/* Non-regular files? If so, the modes must match! */
if (!S_ISREG(source->mode) || !S_ISREG(target->mode)) {
diff --git a/dir.c b/dir.c
index aceb0d4869..47c2fca8dc 100644
--- a/dir.c
+++ b/dir.c
@@ -1282,7 +1282,7 @@ static void prep_exclude(struct dir_struct *dir,
* order, though, if you do that.
*/
if (untracked &&
- oidcmp(&oid_stat.oid, &untracked->exclude_oid)) {
+ !oideq(&oid_stat.oid, &untracked->exclude_oid)) {
invalidate_gitignore(dir->untracked, untracked);
oidcpy(&untracked->exclude_oid, &oid_stat.oid);
}
@@ -2248,12 +2248,12 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
/* Validate $GIT_DIR/info/exclude and core.excludesfile */
root = dir->untracked->root;
- if (oidcmp(&dir->ss_info_exclude.oid,
+ if (!oideq(&dir->ss_info_exclude.oid,
&dir->untracked->ss_info_exclude.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_info_exclude = dir->ss_info_exclude;
}
- if (oidcmp(&dir->ss_excludes_file.oid,
+ if (!oideq(&dir->ss_excludes_file.oid,
&dir->untracked->ss_excludes_file.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_excludes_file = dir->ss_excludes_file;
@@ -2268,10 +2268,13 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
const char *path, int len, const struct pathspec *pathspec)
{
struct untracked_cache_dir *untracked;
- uint64_t start = getnanotime();
- if (has_symlink_leading_path(path, len))
+ trace_performance_enter();
+
+ if (has_symlink_leading_path(path, len)) {
+ trace_performance_leave("read directory %.*s", len, path);
return dir->nr;
+ }
untracked = validate_untracked_cache(dir, len, pathspec);
if (!untracked)
@@ -2307,7 +2310,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
dir->nr = i;
}
- trace_performance_since(start, "read directory %.*s", len, path);
+ trace_performance_leave("read directory %.*s", len, path);
if (dir->untracked) {
static int force_untracked_cache = -1;
static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
diff --git a/entry.c b/entry.c
index 2a2ab6c839..5d136c5d55 100644
--- a/entry.c
+++ b/entry.c
@@ -399,6 +399,34 @@ static int check_path(const char *path, int len, struct stat *st, int skiplen)
return lstat(path, st);
}
+static void mark_colliding_entries(const struct checkout *state,
+ struct cache_entry *ce, struct stat *st)
+{
+ int i, trust_ino = check_stat;
+
+#if defined(GIT_WINDOWS_NATIVE)
+ trust_ino = 0;
+#endif
+
+ ce->ce_flags |= CE_MATCHED;
+
+ for (i = 0; i < state->istate->cache_nr; i++) {
+ struct cache_entry *dup = state->istate->cache[i];
+
+ if (dup == ce)
+ break;
+
+ if (dup->ce_flags & (CE_MATCHED | CE_VALID | CE_SKIP_WORKTREE))
+ continue;
+
+ if ((trust_ino && dup->ce_stat_data.sd_ino == st->st_ino) ||
+ (!trust_ino && !fspathcmp(ce->name, dup->name))) {
+ dup->ce_flags |= CE_MATCHED;
+ break;
+ }
+ }
+}
+
/*
* Write the contents from ce out to the working tree.
*
@@ -456,6 +484,9 @@ int checkout_entry(struct cache_entry *ce,
return -1;
}
+ if (state->clone)
+ mark_colliding_entries(state, ce, &st);
+
/*
* We unlink the old file, to get the new one with the
* right permissions (including umask, which is nasty
diff --git a/fast-import.c b/fast-import.c
index 89bb0c9db3..95600c78e0 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -171,6 +171,7 @@ Format of STDIN stream:
#include "packfile.h"
#include "object-store.h"
#include "mem-pool.h"
+#include "commit-reach.h"
#define PACK_ID_BITS 16
#define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
@@ -572,7 +573,7 @@ static struct object_entry *find_object(struct object_id *oid)
unsigned int h = oid->hash[0] << 8 | oid->hash[1];
struct object_entry *e;
for (e = object_table[h]; e; e = e->next)
- if (!oidcmp(oid, &e->idx.oid))
+ if (oideq(oid, &e->idx.oid))
return e;
return NULL;
}
@@ -583,7 +584,7 @@ static struct object_entry *insert_object(struct object_id *oid)
struct object_entry *e = object_table[h];
while (e) {
- if (!oidcmp(oid, &e->idx.oid))
+ if (oideq(oid, &e->idx.oid))
return e;
e = e->next;
}
@@ -1068,7 +1069,7 @@ static int store_object(
duplicate_count_by_type[type]++;
return 1;
} else if (find_sha1_pack(oid.hash,
- get_packed_git(the_repository))) {
+ get_all_packs(the_repository))) {
e->type = type;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1266,7 +1267,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
truncate_pack(&checkpoint);
} else if (find_sha1_pack(oid.hash,
- get_packed_git(the_repository))) {
+ get_all_packs(the_repository))) {
e->type = OBJ_BLOB;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
@@ -1533,7 +1534,7 @@ static int tree_content_set(
if (!*slash1) {
if (!S_ISDIR(mode)
&& e->versions[1].mode == mode
- && !oidcmp(&e->versions[1].oid, oid))
+ && oideq(&e->versions[1].oid, oid))
return 0;
e->versions[1].mode = mode;
oidcpy(&e->versions[1].oid, oid);
@@ -2649,7 +2650,7 @@ static int parse_from(struct branch *b)
struct object_entry *oe = find_mark(idnum);
if (oe->type != OBJ_COMMIT)
die("Mark :%" PRIuMAX " not a commit", idnum);
- if (oidcmp(&b->oid, &oe->idx.oid)) {
+ if (!oideq(&b->oid, &oe->idx.oid)) {
oidcpy(&b->oid, &oe->idx.oid);
if (oe->pack_id != MAX_PACK_ID) {
unsigned long size;
@@ -2667,7 +2668,7 @@ static int parse_from(struct branch *b)
else
die("Invalid ref name or SHA1 expression: %s", from);
- if (b->branch_tree.tree && oidcmp(&oid, &b->branch_tree.versions[1].oid)) {
+ if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) {
release_tree_content_recursive(b->branch_tree.tree);
b->branch_tree.tree = NULL;
}
diff --git a/fetch-pack.c b/fetch-pack.c
index 88a078e9be..75047a4b2a 100644
--- a/fetch-pack.c
+++ b/fetch-pack.c
@@ -599,7 +599,7 @@ static void filter_refs(struct fetch_pack_args *args,
continue;
if (parse_oid_hex(ref->name, &oid, &p) ||
*p != '\0' ||
- oidcmp(&oid, &ref->old_oid))
+ !oideq(&oid, &ref->old_oid))
continue;
if ((allow_unadvertised_object_request &
diff --git a/git-submodule.sh b/git-submodule.sh
index 1cb2c0a31b..1b568e29b9 100755
--- a/git-submodule.sh
+++ b/git-submodule.sh
@@ -534,31 +534,19 @@ cmd_update()
"$@" || echo "#unmatched" $?
} | {
err=
- while read -r mode sha1 stage just_cloned sm_path
+ while read -r quickabort sha1 just_cloned sm_path
do
- die_if_unmatched "$mode" "$sha1"
+ die_if_unmatched "$quickabort" "$sha1"
- name=$(git submodule--helper name "$sm_path") || exit
- if ! test -z "$update"
- then
- update_module=$update
- else
- update_module=$(git config submodule."$name".update)
- if test -z "$update_module"
- then
- update_module="checkout"
- fi
- fi
+ git submodule--helper ensure-core-worktree "$sm_path"
+
+ update_module=$(git submodule--helper update-module-mode $just_cloned "$sm_path" $update)
displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
if test $just_cloned -eq 1
then
subsha1=
- case "$update_module" in
- merge | rebase | none)
- update_module=checkout ;;
- esac
else
subsha1=$(sanitize_submodule_env; cd "$sm_path" &&
git rev-parse --verify HEAD) ||
@@ -630,7 +618,7 @@ cmd_update()
must_die_on_failure=yes
;;
*)
- die "$(eval_gettext "Invalid update mode '$update_module' for submodule '$name'")"
+ die "$(eval_gettext "Invalid update mode '$update_module' for submodule path '$path'")"
esac
if (sanitize_submodule_env; cd "$sm_path" && $command "$sha1")
diff --git a/git.c b/git.c
index c27c38738b..a6f4b44af5 100644
--- a/git.c
+++ b/git.c
@@ -508,6 +508,7 @@ static struct cmd_struct commands[] = {
{ "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT },
{ "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT },
{ "mktree", cmd_mktree, RUN_SETUP },
+ { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP_GENTLY },
{ "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE },
{ "name-rev", cmd_name_rev, RUN_SETUP },
{ "notes", cmd_notes, RUN_SETUP },
diff --git a/http-backend.c b/http-backend.c
index 458642ef72..9e894f197f 100644
--- a/http-backend.c
+++ b/http-backend.c
@@ -595,13 +595,13 @@ static void get_info_packs(struct strbuf *hdr, char *arg)
size_t cnt = 0;
select_getanyfile(hdr);
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (p->pack_local)
cnt++;
}
strbuf_grow(&buf, cnt * 53 + 2);
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (p->pack_local)
strbuf_addf(&buf, "P %s\n", p->pack_name + objdirlen + 6);
}
diff --git a/http-push.c b/http-push.c
index 5eaf551b51..1bbb0cdb6d 100644
--- a/http-push.c
+++ b/http-push.c
@@ -14,7 +14,7 @@
#include "argv-array.h"
#include "packfile.h"
#include "object-store.h"
-
+#include "commit-reach.h"
#ifdef EXPAT_NEEDS_XMLPARSE_H
#include <xmlparse.h>
@@ -1859,7 +1859,7 @@ int cmd_main(int argc, const char **argv)
continue;
}
- if (!oidcmp(&ref->old_oid, &ref->peer_ref->new_oid)) {
+ if (oideq(&ref->old_oid, &ref->peer_ref->new_oid)) {
if (push_verbosely)
fprintf(stderr, "'%s': up-to-date\n", ref->name);
if (helper_status)
diff --git a/http-walker.c b/http-walker.c
index 7cdfb2f24c..b3334bf657 100644
--- a/http-walker.c
+++ b/http-walker.c
@@ -483,7 +483,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
list_for_each(pos, head) {
obj_req = list_entry(pos, struct object_request, node);
- if (!hashcmp(obj_req->oid.hash, sha1))
+ if (hasheq(obj_req->oid.hash, sha1))
break;
}
if (obj_req == NULL)
@@ -543,7 +543,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
} else if (req->zret != Z_STREAM_END) {
walker->corrupt_object_found++;
ret = error("File %s (%s) corrupt", hex, req->url);
- } else if (hashcmp(obj_req->oid.hash, req->real_sha1)) {
+ } else if (!hasheq(obj_req->oid.hash, req->real_sha1)) {
ret = error("File %s has bad hash", hex);
} else if (req->rename < 0) {
struct strbuf buf = STRBUF_INIT;
diff --git a/http.c b/http.c
index 4162860ee3..98ff122585 100644
--- a/http.c
+++ b/http.c
@@ -2394,7 +2394,7 @@ int finish_http_object_request(struct http_object_request *freq)
unlink_or_warn(freq->tmpfile.buf);
return -1;
}
- if (hashcmp(freq->sha1, freq->real_sha1)) {
+ if (!hasheq(freq->sha1, freq->real_sha1)) {
unlink_or_warn(freq->tmpfile.buf);
return -1;
}
diff --git a/interdiff.c b/interdiff.c
new file mode 100644
index 0000000000..c81d680a6c
--- /dev/null
+++ b/interdiff.c
@@ -0,0 +1,28 @@
+#include "cache.h"
+#include "commit.h"
+#include "revision.h"
+#include "interdiff.h"
+
+static struct strbuf *idiff_prefix_cb(struct diff_options *opt, void *data)
+{
+ return data;
+}
+
+void show_interdiff(struct rev_info *rev, int indent)
+{
+ struct diff_options opts;
+ struct strbuf prefix = STRBUF_INIT;
+
+ memcpy(&opts, &rev->diffopt, sizeof(opts));
+ opts.output_format = DIFF_FORMAT_PATCH;
+ opts.output_prefix = idiff_prefix_cb;
+ strbuf_addchars(&prefix, ' ', indent);
+ opts.output_prefix_data = &prefix;
+ diff_setup_done(&opts);
+
+ diff_tree_oid(rev->idiff_oid1, rev->idiff_oid2, "", &opts);
+ diffcore_std(&opts);
+ diff_flush(&opts);
+
+ strbuf_release(&prefix);
+}
diff --git a/interdiff.h b/interdiff.h
new file mode 100644
index 0000000000..01c730a5c9
--- /dev/null
+++ b/interdiff.h
@@ -0,0 +1,8 @@
+#ifndef INTERDIFF_H
+#define INTERDIFF_H
+
+struct rev_info;
+
+void show_interdiff(struct rev_info *, int indent);
+
+#endif
diff --git a/ll-merge.c b/ll-merge.c
index 0e2800f7bb..1936fee9e1 100644
--- a/ll-merge.c
+++ b/ll-merge.c
@@ -371,13 +371,12 @@ int ll_merge(mmbuffer_t *result_buf,
if (!check)
check = attr_check_initl("merge", "conflict-marker-size", NULL);
- if (!git_check_attr(&the_index, path, check)) {
- ll_driver_name = check->items[0].value;
- if (check->items[1].value) {
- marker_size = atoi(check->items[1].value);
- if (marker_size <= 0)
- marker_size = DEFAULT_CONFLICT_MARKER_SIZE;
- }
+ git_check_attr(&the_index, path, check);
+ ll_driver_name = check->items[0].value;
+ if (check->items[1].value) {
+ marker_size = atoi(check->items[1].value);
+ if (marker_size <= 0)
+ marker_size = DEFAULT_CONFLICT_MARKER_SIZE;
}
driver = find_ll_merge_driver(ll_driver_name);
@@ -398,7 +397,8 @@ int ll_merge_marker_size(const char *path)
if (!check)
check = attr_check_initl("conflict-marker-size", NULL);
- if (!git_check_attr(&the_index, path, check) && check->items[0].value) {
+ git_check_attr(&the_index, path, check);
+ if (check->items[0].value) {
marker_size = atoi(check->items[0].value);
if (marker_size <= 0)
marker_size = DEFAULT_CONFLICT_MARKER_SIZE;
diff --git a/lockfile.h b/lockfile.h
index f401c979f0..35403ccc0d 100644
--- a/lockfile.h
+++ b/lockfile.h
@@ -263,8 +263,8 @@ static inline int close_lock_file_gently(struct lock_file *lk)
* nobody else) to inspect the contents you wrote, while still
* holding the lock yourself.
*
- * * `reopen_lock_file()` to reopen the lockfile. Make further updates
- * to the contents.
+ * * `reopen_lock_file()` to reopen the lockfile, truncating the existing
+ * contents. Write out the new contents.
*
* * `commit_lock_file()` to make the final version permanent.
*/
diff --git a/log-tree.c b/log-tree.c
index 7443e5fcc7..7a83e99250 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -15,6 +15,8 @@
#include "sequencer.h"
#include "line-log.h"
#include "help.h"
+#include "interdiff.h"
+#include "range-diff.h"
static struct decoration name_decoration = { "object names" };
static int decoration_loaded;
@@ -472,7 +474,7 @@ static int which_parent(const struct object_id *oid, const struct commit *commit
const struct commit_list *parent;
for (nth = 0, parent = commit->parents; parent; parent = parent->next) {
- if (!oidcmp(&parent->item->object.oid, oid))
+ if (oideq(&parent->item->object.oid, oid))
return nth;
nth++;
}
@@ -506,8 +508,8 @@ static int show_one_mergetag(struct commit *commit,
if (parse_tag_buffer(the_repository, tag, extra->value, extra->len))
strbuf_addstr(&verify_message, "malformed mergetag\n");
else if (is_common_merge(commit) &&
- !oidcmp(&tag->tagged->oid,
- &commit->parents->next->item->object.oid))
+ oideq(&tag->tagged->oid,
+ &commit->parents->next->item->object.oid))
strbuf_addf(&verify_message,
"merged tag '%s'\n", tag->tag);
else if ((nth = which_parent(&tag->tagged->oid, commit)) < 0)
@@ -542,6 +544,16 @@ static int show_mergetag(struct rev_info *opt, struct commit *commit)
return for_each_mergetag(show_one_mergetag, commit, opt);
}
+static void next_commentary_block(struct rev_info *opt, struct strbuf *sb)
+{
+ const char *x = opt->shown_dashes ? "\n" : "---\n";
+ if (sb)
+ strbuf_addstr(sb, x);
+ else
+ fputs(x, opt->diffopt.file);
+ opt->shown_dashes = 1;
+}
+
void show_log(struct rev_info *opt)
{
struct strbuf msgbuf = STRBUF_INIT;
@@ -699,10 +711,8 @@ void show_log(struct rev_info *opt)
if ((ctx.fmt != CMIT_FMT_USERFORMAT) &&
ctx.notes_message && *ctx.notes_message) {
- if (cmit_fmt_is_mail(ctx.fmt)) {
- strbuf_addstr(&msgbuf, "---\n");
- opt->shown_dashes = 1;
- }
+ if (cmit_fmt_is_mail(ctx.fmt))
+ next_commentary_block(opt, &msgbuf);
strbuf_addstr(&msgbuf, ctx.notes_message);
}
@@ -729,6 +739,33 @@ void show_log(struct rev_info *opt)
strbuf_release(&msgbuf);
free(ctx.notes_message);
+
+ if (cmit_fmt_is_mail(ctx.fmt) && opt->idiff_oid1) {
+ struct diff_queue_struct dq;
+
+ memcpy(&dq, &diff_queued_diff, sizeof(diff_queued_diff));
+ DIFF_QUEUE_CLEAR(&diff_queued_diff);
+
+ next_commentary_block(opt, NULL);
+ fprintf_ln(opt->diffopt.file, "%s", opt->idiff_title);
+ show_interdiff(opt, 2);
+
+ memcpy(&diff_queued_diff, &dq, sizeof(diff_queued_diff));
+ }
+
+ if (cmit_fmt_is_mail(ctx.fmt) && opt->rdiff1) {
+ struct diff_queue_struct dq;
+
+ memcpy(&dq, &diff_queued_diff, sizeof(diff_queued_diff));
+ DIFF_QUEUE_CLEAR(&diff_queued_diff);
+
+ next_commentary_block(opt, NULL);
+ fprintf_ln(opt->diffopt.file, "%s", opt->rdiff_title);
+ show_range_diff(opt->rdiff1, opt->rdiff2,
+ opt->creation_factor, 1, &opt->diffopt);
+
+ memcpy(&diff_queued_diff, &dq, sizeof(diff_queued_diff));
+ }
}
int log_tree_diff_flush(struct rev_info *opt)
@@ -766,9 +803,10 @@ int log_tree_diff_flush(struct rev_info *opt)
/*
* We may have shown three-dashes line early
- * between notes and the log message, in which
- * case we only want a blank line after the
- * notes without (an extra) three-dashes line.
+ * between generated commentary (notes, etc.)
+ * and the log message, in which case we only
+ * want a blank line after the commentary
+ * without (an extra) three-dashes line.
* Otherwise, we show the three-dashes line if
* we are showing the patch with diffstat, but
* in that case, there is no extra blank line
diff --git a/mailinfo.c b/mailinfo.c
index 3281a37d51..b395adbdf2 100644
--- a/mailinfo.c
+++ b/mailinfo.c
@@ -237,11 +237,22 @@ static int slurp_attr(const char *line, const char *name, struct strbuf *attr)
return 1;
}
+static int has_attr_value(const char *line, const char *name, const char *value)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value);
+ strbuf_release(&sb);
+ return rc;
+}
+
static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
{
struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
strbuf_init(boundary, line->len);
+ mi->format_flowed = has_attr_value(line->buf, "format=", "flowed");
+ mi->delsp = has_attr_value(line->buf, "delsp=", "yes");
+
if (slurp_attr(line->buf, "boundary=", boundary)) {
strbuf_insert(boundary, 0, "--", 2);
if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
@@ -964,6 +975,52 @@ again:
return 1;
}
+static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line,
+ struct strbuf *prev)
+{
+ size_t len = line->len;
+ const char *rest;
+
+ if (!mi->format_flowed) {
+ handle_filter(mi, line);
+ return;
+ }
+
+ if (line->buf[len - 1] == '\n') {
+ len--;
+ if (len && line->buf[len - 1] == '\r')
+ len--;
+ }
+
+ /* Keep signature separator as-is. */
+ if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) {
+ if (prev->len) {
+ handle_filter(mi, prev);
+ strbuf_reset(prev);
+ }
+ handle_filter(mi, line);
+ return;
+ }
+
+ /* Unstuff space-stuffed line. */
+ if (len && line->buf[0] == ' ') {
+ strbuf_remove(line, 0, 1);
+ len--;
+ }
+
+ /* Save flowed line for later, but without the soft line break. */
+ if (len && line->buf[len - 1] == ' ') {
+ strbuf_add(prev, line->buf, len - !!mi->delsp);
+ return;
+ }
+
+ /* Prepend any previous partial lines */
+ strbuf_insert(line, 0, prev->buf, prev->len);
+ strbuf_reset(prev);
+
+ handle_filter(mi, line);
+}
+
static void handle_body(struct mailinfo *mi, struct strbuf *line)
{
struct strbuf prev = STRBUF_INIT;
@@ -1012,7 +1069,7 @@ static void handle_body(struct mailinfo *mi, struct strbuf *line)
strbuf_addbuf(&prev, sb);
break;
}
- handle_filter(mi, sb);
+ handle_filter_flowed(mi, sb, &prev);
}
/*
* The partial chunk is saved in "prev" and will be
@@ -1022,13 +1079,16 @@ static void handle_body(struct mailinfo *mi, struct strbuf *line)
break;
}
default:
- handle_filter(mi, line);
+ handle_filter_flowed(mi, line, &prev);
}
if (mi->input_error)
break;
} while (!strbuf_getwholeline(line, mi->input, '\n'));
+ if (prev.len)
+ handle_filter(mi, &prev);
+
flush_inbody_header_accum(mi);
handle_body_out:
diff --git a/mailinfo.h b/mailinfo.h
index 766c03dd1d..6830e1e625 100644
--- a/mailinfo.h
+++ b/mailinfo.h
@@ -22,6 +22,8 @@ struct mailinfo {
struct strbuf *content[MAX_BOUNDARIES];
struct strbuf **content_top;
struct strbuf charset;
+ unsigned int format_flowed:1;
+ unsigned int delsp:1;
char *message_id;
enum {
TE_DONTCARE, TE_QP, TE_BASE64
diff --git a/match-trees.c b/match-trees.c
index 37653308d3..2b6d31ef9d 100644
--- a/match-trees.c
+++ b/match-trees.c
@@ -106,7 +106,7 @@ static int score_trees(const struct object_id *hash1, const struct object_id *ha
update_tree_entry(&two);
} else {
/* path appears in both */
- if (oidcmp(one.entry.oid, two.entry.oid)) {
+ if (!oideq(one.entry.oid, two.entry.oid)) {
/* they are different */
score += score_differs(one.entry.mode,
two.entry.mode,
diff --git a/merge-recursive.c b/merge-recursive.c
index e5243dbc54..fa87341e67 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -27,6 +27,7 @@
#include "dir.h"
#include "submodule.h"
#include "revision.h"
+#include "commit-reach.h"
struct path_hashmap_entry {
struct hashmap_entry e;
@@ -156,7 +157,7 @@ static struct tree *shift_tree_object(struct tree *one, struct tree *two,
shift_tree_by(&one->object.oid, &two->object.oid, &shifted,
subtree_shift);
}
- if (!oidcmp(&two->object.oid, &shifted))
+ if (oideq(&two->object.oid, &shifted))
return two;
return lookup_tree(the_repository, &shifted);
}
@@ -179,7 +180,7 @@ static int oid_eq(const struct object_id *a, const struct object_id *b)
{
if (!a && !b)
return 2;
- return a && b && oidcmp(a, b) == 0;
+ return a && b && oideq(a, b);
}
enum rename_type {
@@ -2239,7 +2240,7 @@ static struct dir_rename_entry *check_dir_renamed(const char *path,
{
char *temp = xstrdup(path);
char *end;
- struct dir_rename_entry *entry = NULL;;
+ struct dir_rename_entry *entry = NULL;
while ((end = strrchr(temp, '/'))) {
*end = '\0';
diff --git a/midx.c b/midx.c
new file mode 100644
index 0000000000..f3e8dbc108
--- /dev/null
+++ b/midx.c
@@ -0,0 +1,930 @@
+#include "cache.h"
+#include "config.h"
+#include "csum-file.h"
+#include "dir.h"
+#include "lockfile.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "sha1-lookup.h"
+#include "midx.h"
+
+#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
+#define MIDX_VERSION 1
+#define MIDX_BYTE_FILE_VERSION 4
+#define MIDX_BYTE_HASH_VERSION 5
+#define MIDX_BYTE_NUM_CHUNKS 6
+#define MIDX_BYTE_NUM_PACKS 8
+#define MIDX_HASH_VERSION 1
+#define MIDX_HEADER_SIZE 12
+#define MIDX_HASH_LEN 20
+#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + MIDX_HASH_LEN)
+
+#define MIDX_MAX_CHUNKS 5
+#define MIDX_CHUNK_ALIGNMENT 4
+#define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
+#define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
+#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
+#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
+#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
+#define MIDX_CHUNKLOOKUP_WIDTH (sizeof(uint32_t) + sizeof(uint64_t))
+#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
+#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
+#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
+#define MIDX_LARGE_OFFSET_NEEDED 0x80000000
+
+static char *get_midx_filename(const char *object_dir)
+{
+ return xstrfmt("%s/pack/multi-pack-index", object_dir);
+}
+
+struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local)
+{
+ struct multi_pack_index *m = NULL;
+ int fd;
+ struct stat st;
+ size_t midx_size;
+ void *midx_map = NULL;
+ uint32_t hash_version;
+ char *midx_name = get_midx_filename(object_dir);
+ uint32_t i;
+ const char *cur_pack_name;
+
+ fd = git_open(midx_name);
+
+ if (fd < 0)
+ goto cleanup_fail;
+ if (fstat(fd, &st)) {
+ error_errno(_("failed to read %s"), midx_name);
+ goto cleanup_fail;
+ }
+
+ midx_size = xsize_t(st.st_size);
+
+ if (midx_size < MIDX_MIN_SIZE) {
+ error(_("multi-pack-index file %s is too small"), midx_name);
+ goto cleanup_fail;
+ }
+
+ FREE_AND_NULL(midx_name);
+
+ midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0);
+
+ FLEX_ALLOC_MEM(m, object_dir, object_dir, strlen(object_dir));
+ m->fd = fd;
+ m->data = midx_map;
+ m->data_len = midx_size;
+ m->local = local;
+
+ m->signature = get_be32(m->data);
+ if (m->signature != MIDX_SIGNATURE) {
+ error(_("multi-pack-index signature 0x%08x does not match signature 0x%08x"),
+ m->signature, MIDX_SIGNATURE);
+ goto cleanup_fail;
+ }
+
+ m->version = m->data[MIDX_BYTE_FILE_VERSION];
+ if (m->version != MIDX_VERSION) {
+ error(_("multi-pack-index version %d not recognized"),
+ m->version);
+ goto cleanup_fail;
+ }
+
+ hash_version = m->data[MIDX_BYTE_HASH_VERSION];
+ if (hash_version != MIDX_HASH_VERSION) {
+ error(_("hash version %u does not match"), hash_version);
+ goto cleanup_fail;
+ }
+ m->hash_len = MIDX_HASH_LEN;
+
+ m->num_chunks = m->data[MIDX_BYTE_NUM_CHUNKS];
+
+ m->num_packs = get_be32(m->data + MIDX_BYTE_NUM_PACKS);
+
+ for (i = 0; i < m->num_chunks; i++) {
+ uint32_t chunk_id = get_be32(m->data + MIDX_HEADER_SIZE +
+ MIDX_CHUNKLOOKUP_WIDTH * i);
+ uint64_t chunk_offset = get_be64(m->data + MIDX_HEADER_SIZE + 4 +
+ MIDX_CHUNKLOOKUP_WIDTH * i);
+
+ switch (chunk_id) {
+ case MIDX_CHUNKID_PACKNAMES:
+ m->chunk_pack_names = m->data + chunk_offset;
+ break;
+
+ case MIDX_CHUNKID_OIDFANOUT:
+ m->chunk_oid_fanout = (uint32_t *)(m->data + chunk_offset);
+ break;
+
+ case MIDX_CHUNKID_OIDLOOKUP:
+ m->chunk_oid_lookup = m->data + chunk_offset;
+ break;
+
+ case MIDX_CHUNKID_OBJECTOFFSETS:
+ m->chunk_object_offsets = m->data + chunk_offset;
+ break;
+
+ case MIDX_CHUNKID_LARGEOFFSETS:
+ m->chunk_large_offsets = m->data + chunk_offset;
+ break;
+
+ case 0:
+ die(_("terminating multi-pack-index chunk id appears earlier than expected"));
+ break;
+
+ default:
+ /*
+ * Do nothing on unrecognized chunks, allowing future
+ * extensions to add optional chunks.
+ */
+ break;
+ }
+ }
+
+ if (!m->chunk_pack_names)
+ die(_("multi-pack-index missing required pack-name chunk"));
+ if (!m->chunk_oid_fanout)
+ die(_("multi-pack-index missing required OID fanout chunk"));
+ if (!m->chunk_oid_lookup)
+ die(_("multi-pack-index missing required OID lookup chunk"));
+ if (!m->chunk_object_offsets)
+ die(_("multi-pack-index missing required object offsets chunk"));
+
+ m->num_objects = ntohl(m->chunk_oid_fanout[255]);
+
+ m->pack_names = xcalloc(m->num_packs, sizeof(*m->pack_names));
+ m->packs = xcalloc(m->num_packs, sizeof(*m->packs));
+
+ cur_pack_name = (const char *)m->chunk_pack_names;
+ for (i = 0; i < m->num_packs; i++) {
+ m->pack_names[i] = cur_pack_name;
+
+ cur_pack_name += strlen(cur_pack_name) + 1;
+
+ if (i && strcmp(m->pack_names[i], m->pack_names[i - 1]) <= 0) {
+ error(_("multi-pack-index pack names out of order: '%s' before '%s'"),
+ m->pack_names[i - 1],
+ m->pack_names[i]);
+ goto cleanup_fail;
+ }
+ }
+
+ return m;
+
+cleanup_fail:
+ free(m);
+ free(midx_name);
+ if (midx_map)
+ munmap(midx_map, midx_size);
+ if (0 <= fd)
+ close(fd);
+ return NULL;
+}
+
+static void close_midx(struct multi_pack_index *m)
+{
+ uint32_t i;
+ munmap((unsigned char *)m->data, m->data_len);
+ close(m->fd);
+ m->fd = -1;
+
+ for (i = 0; i < m->num_packs; i++) {
+ if (m->packs[i]) {
+ close_pack(m->packs[i]);
+ free(m->packs);
+ }
+ }
+ FREE_AND_NULL(m->packs);
+ FREE_AND_NULL(m->pack_names);
+}
+
+int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id)
+{
+ struct strbuf pack_name = STRBUF_INIT;
+
+ if (pack_int_id >= m->num_packs)
+ BUG("bad pack-int-id");
+
+ if (m->packs[pack_int_id])
+ return 0;
+
+ strbuf_addf(&pack_name, "%s/pack/%s", m->object_dir,
+ m->pack_names[pack_int_id]);
+
+ m->packs[pack_int_id] = add_packed_git(pack_name.buf, pack_name.len, m->local);
+ strbuf_release(&pack_name);
+ return !m->packs[pack_int_id];
+}
+
+int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result)
+{
+ return bsearch_hash(oid->hash, m->chunk_oid_fanout, m->chunk_oid_lookup,
+ MIDX_HASH_LEN, result);
+}
+
+struct object_id *nth_midxed_object_oid(struct object_id *oid,
+ struct multi_pack_index *m,
+ uint32_t n)
+{
+ if (n >= m->num_objects)
+ return NULL;
+
+ hashcpy(oid->hash, m->chunk_oid_lookup + m->hash_len * n);
+ return oid;
+}
+
+static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
+{
+ const unsigned char *offset_data;
+ uint32_t offset32;
+
+ offset_data = m->chunk_object_offsets + pos * MIDX_CHUNK_OFFSET_WIDTH;
+ offset32 = get_be32(offset_data + sizeof(uint32_t));
+
+ if (m->chunk_large_offsets && offset32 & MIDX_LARGE_OFFSET_NEEDED) {
+ if (sizeof(offset32) < sizeof(uint64_t))
+ die(_("multi-pack-index stores a 64-bit offset, but off_t is too small"));
+
+ offset32 ^= MIDX_LARGE_OFFSET_NEEDED;
+ return get_be64(m->chunk_large_offsets + sizeof(uint64_t) * offset32);
+ }
+
+ return offset32;
+}
+
+static uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
+{
+ return get_be32(m->chunk_object_offsets + pos * MIDX_CHUNK_OFFSET_WIDTH);
+}
+
+static int nth_midxed_pack_entry(struct multi_pack_index *m, struct pack_entry *e, uint32_t pos)
+{
+ uint32_t pack_int_id;
+ struct packed_git *p;
+
+ if (pos >= m->num_objects)
+ return 0;
+
+ pack_int_id = nth_midxed_pack_int_id(m, pos);
+
+ if (prepare_midx_pack(m, pack_int_id))
+ die(_("error preparing packfile from multi-pack-index"));
+ p = m->packs[pack_int_id];
+
+ /*
+ * We are about to tell the caller where they can locate the
+ * requested object. We better make sure the packfile is
+ * still here and can be accessed before supplying that
+ * answer, as it may have been deleted since the MIDX was
+ * loaded!
+ */
+ if (!is_pack_valid(p))
+ return 0;
+
+ if (p->num_bad_objects) {
+ uint32_t i;
+ struct object_id oid;
+ nth_midxed_object_oid(&oid, m, pos);
+ for (i = 0; i < p->num_bad_objects; i++)
+ if (!hashcmp(oid.hash,
+ p->bad_object_sha1 + the_hash_algo->rawsz * i))
+ return 0;
+ }
+
+ e->offset = nth_midxed_offset(m, pos);
+ e->p = p;
+
+ return 1;
+}
+
+int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m)
+{
+ uint32_t pos;
+
+ if (!bsearch_midx(oid, m, &pos))
+ return 0;
+
+ return nth_midxed_pack_entry(m, e, pos);
+}
+
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_name)
+{
+ uint32_t first = 0, last = m->num_packs;
+
+ while (first < last) {
+ uint32_t mid = first + (last - first) / 2;
+ const char *current;
+ int cmp;
+
+ current = m->pack_names[mid];
+ cmp = strcmp(idx_name, current);
+ if (!cmp)
+ return 1;
+ if (cmp > 0) {
+ first = mid + 1;
+ continue;
+ }
+ last = mid;
+ }
+
+ return 0;
+}
+
+int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local)
+{
+ struct multi_pack_index *m;
+ struct multi_pack_index *m_search;
+ int config_value;
+
+ if (repo_config_get_bool(r, "core.multipackindex", &config_value) ||
+ !config_value)
+ return 0;
+
+ for (m_search = r->objects->multi_pack_index; m_search; m_search = m_search->next)
+ if (!strcmp(object_dir, m_search->object_dir))
+ return 1;
+
+ m = load_multi_pack_index(object_dir, local);
+
+ if (m) {
+ m->next = r->objects->multi_pack_index;
+ r->objects->multi_pack_index = m;
+ return 1;
+ }
+
+ return 0;
+}
+
+static size_t write_midx_header(struct hashfile *f,
+ unsigned char num_chunks,
+ uint32_t num_packs)
+{
+ unsigned char byte_values[4];
+
+ hashwrite_be32(f, MIDX_SIGNATURE);
+ byte_values[0] = MIDX_VERSION;
+ byte_values[1] = MIDX_HASH_VERSION;
+ byte_values[2] = num_chunks;
+ byte_values[3] = 0; /* unused */
+ hashwrite(f, byte_values, sizeof(byte_values));
+ hashwrite_be32(f, num_packs);
+
+ return MIDX_HEADER_SIZE;
+}
+
+struct pack_list {
+ struct packed_git **list;
+ char **names;
+ uint32_t nr;
+ uint32_t alloc_list;
+ uint32_t alloc_names;
+ size_t pack_name_concat_len;
+ struct multi_pack_index *m;
+};
+
+static void add_pack_to_midx(const char *full_path, size_t full_path_len,
+ const char *file_name, void *data)
+{
+ struct pack_list *packs = (struct pack_list *)data;
+
+ if (ends_with(file_name, ".idx")) {
+ if (packs->m && midx_contains_pack(packs->m, file_name))
+ return;
+
+ ALLOC_GROW(packs->list, packs->nr + 1, packs->alloc_list);
+ ALLOC_GROW(packs->names, packs->nr + 1, packs->alloc_names);
+
+ packs->list[packs->nr] = add_packed_git(full_path,
+ full_path_len,
+ 0);
+
+ if (!packs->list[packs->nr]) {
+ warning(_("failed to add packfile '%s'"),
+ full_path);
+ return;
+ }
+
+ if (open_pack_index(packs->list[packs->nr])) {
+ warning(_("failed to open pack-index '%s'"),
+ full_path);
+ close_pack(packs->list[packs->nr]);
+ FREE_AND_NULL(packs->list[packs->nr]);
+ return;
+ }
+
+ packs->names[packs->nr] = xstrdup(file_name);
+ packs->pack_name_concat_len += strlen(file_name) + 1;
+ packs->nr++;
+ }
+}
+
+struct pack_pair {
+ uint32_t pack_int_id;
+ char *pack_name;
+};
+
+static int pack_pair_compare(const void *_a, const void *_b)
+{
+ struct pack_pair *a = (struct pack_pair *)_a;
+ struct pack_pair *b = (struct pack_pair *)_b;
+ return strcmp(a->pack_name, b->pack_name);
+}
+
+static void sort_packs_by_name(char **pack_names, uint32_t nr_packs, uint32_t *perm)
+{
+ uint32_t i;
+ struct pack_pair *pairs;
+
+ ALLOC_ARRAY(pairs, nr_packs);
+
+ for (i = 0; i < nr_packs; i++) {
+ pairs[i].pack_int_id = i;
+ pairs[i].pack_name = pack_names[i];
+ }
+
+ QSORT(pairs, nr_packs, pack_pair_compare);
+
+ for (i = 0; i < nr_packs; i++) {
+ pack_names[i] = pairs[i].pack_name;
+ perm[pairs[i].pack_int_id] = i;
+ }
+
+ free(pairs);
+}
+
+struct pack_midx_entry {
+ struct object_id oid;
+ uint32_t pack_int_id;
+ time_t pack_mtime;
+ uint64_t offset;
+};
+
+static int midx_oid_compare(const void *_a, const void *_b)
+{
+ const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a;
+ const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b;
+ int cmp = oidcmp(&a->oid, &b->oid);
+
+ if (cmp)
+ return cmp;
+
+ if (a->pack_mtime > b->pack_mtime)
+ return -1;
+ else if (a->pack_mtime < b->pack_mtime)
+ return 1;
+
+ return a->pack_int_id - b->pack_int_id;
+}
+
+static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
+ uint32_t *pack_perm,
+ struct pack_midx_entry *e,
+ uint32_t pos)
+{
+ if (pos >= m->num_objects)
+ return 1;
+
+ nth_midxed_object_oid(&e->oid, m, pos);
+ e->pack_int_id = pack_perm[nth_midxed_pack_int_id(m, pos)];
+ e->offset = nth_midxed_offset(m, pos);
+
+ /* consider objects in midx to be from "old" packs */
+ e->pack_mtime = 0;
+ return 0;
+}
+
+static void fill_pack_entry(uint32_t pack_int_id,
+ struct packed_git *p,
+ uint32_t cur_object,
+ struct pack_midx_entry *entry)
+{
+ if (!nth_packed_object_oid(&entry->oid, p, cur_object))
+ die(_("failed to locate object %d in packfile"), cur_object);
+
+ entry->pack_int_id = pack_int_id;
+ entry->pack_mtime = p->mtime;
+
+ entry->offset = nth_packed_object_offset(p, cur_object);
+}
+
+/*
+ * It is possible to artificially get into a state where there are many
+ * duplicate copies of objects. That can create high memory pressure if
+ * we are to create a list of all objects before de-duplication. To reduce
+ * this memory pressure without a significant performance drop, automatically
+ * group objects by the first byte of their object id. Use the IDX fanout
+ * tables to group the data, copy to a local array, then sort.
+ *
+ * Copy only the de-duplicated entries (selected by most-recent modified time
+ * of a packfile containing the object).
+ */
+static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
+ struct packed_git **p,
+ uint32_t *perm,
+ uint32_t nr_packs,
+ uint32_t *nr_objects)
+{
+ uint32_t cur_fanout, cur_pack, cur_object;
+ uint32_t alloc_fanout, alloc_objects, total_objects = 0;
+ struct pack_midx_entry *entries_by_fanout = NULL;
+ struct pack_midx_entry *deduplicated_entries = NULL;
+ uint32_t start_pack = m ? m->num_packs : 0;
+
+ for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
+ total_objects += p[cur_pack]->num_objects;
+
+ /*
+ * As we de-duplicate by fanout value, we expect the fanout
+ * slices to be evenly distributed, with some noise. Hence,
+ * allocate slightly more than one 256th.
+ */
+ alloc_objects = alloc_fanout = total_objects > 3200 ? total_objects / 200 : 16;
+
+ ALLOC_ARRAY(entries_by_fanout, alloc_fanout);
+ ALLOC_ARRAY(deduplicated_entries, alloc_objects);
+ *nr_objects = 0;
+
+ for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
+ uint32_t nr_fanout = 0;
+
+ if (m) {
+ uint32_t start = 0, end;
+
+ if (cur_fanout)
+ start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
+ end = ntohl(m->chunk_oid_fanout[cur_fanout]);
+
+ for (cur_object = start; cur_object < end; cur_object++) {
+ ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
+ nth_midxed_pack_midx_entry(m, perm,
+ &entries_by_fanout[nr_fanout],
+ cur_object);
+ nr_fanout++;
+ }
+ }
+
+ for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
+ uint32_t start = 0, end;
+
+ if (cur_fanout)
+ start = get_pack_fanout(p[cur_pack], cur_fanout - 1);
+ end = get_pack_fanout(p[cur_pack], cur_fanout);
+
+ for (cur_object = start; cur_object < end; cur_object++) {
+ ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
+ fill_pack_entry(perm[cur_pack], p[cur_pack], cur_object, &entries_by_fanout[nr_fanout]);
+ nr_fanout++;
+ }
+ }
+
+ QSORT(entries_by_fanout, nr_fanout, midx_oid_compare);
+
+ /*
+ * The batch is now sorted by OID and then mtime (descending).
+ * Take only the first duplicate.
+ */
+ for (cur_object = 0; cur_object < nr_fanout; cur_object++) {
+ if (cur_object && !oidcmp(&entries_by_fanout[cur_object - 1].oid,
+ &entries_by_fanout[cur_object].oid))
+ continue;
+
+ ALLOC_GROW(deduplicated_entries, *nr_objects + 1, alloc_objects);
+ memcpy(&deduplicated_entries[*nr_objects],
+ &entries_by_fanout[cur_object],
+ sizeof(struct pack_midx_entry));
+ (*nr_objects)++;
+ }
+ }
+
+ free(entries_by_fanout);
+ return deduplicated_entries;
+}
+
+static size_t write_midx_pack_names(struct hashfile *f,
+ char **pack_names,
+ uint32_t num_packs)
+{
+ uint32_t i;
+ unsigned char padding[MIDX_CHUNK_ALIGNMENT];
+ size_t written = 0;
+
+ for (i = 0; i < num_packs; i++) {
+ size_t writelen = strlen(pack_names[i]) + 1;
+
+ if (i && strcmp(pack_names[i], pack_names[i - 1]) <= 0)
+ BUG("incorrect pack-file order: %s before %s",
+ pack_names[i - 1],
+ pack_names[i]);
+
+ hashwrite(f, pack_names[i], writelen);
+ written += writelen;
+ }
+
+ /* add padding to be aligned */
+ i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT);
+ if (i < MIDX_CHUNK_ALIGNMENT) {
+ memset(padding, 0, sizeof(padding));
+ hashwrite(f, padding, i);
+ written += i;
+ }
+
+ return written;
+}
+
+static size_t write_midx_oid_fanout(struct hashfile *f,
+ struct pack_midx_entry *objects,
+ uint32_t nr_objects)
+{
+ struct pack_midx_entry *list = objects;
+ struct pack_midx_entry *last = objects + nr_objects;
+ uint32_t count = 0;
+ uint32_t i;
+
+ /*
+ * Write the first-level table (the list is sorted,
+ * but we use a 256-entry lookup to be able to avoid
+ * having to do eight extra binary search iterations).
+ */
+ for (i = 0; i < 256; i++) {
+ struct pack_midx_entry *next = list;
+
+ while (next < last && next->oid.hash[0] == i) {
+ count++;
+ next++;
+ }
+
+ hashwrite_be32(f, count);
+ list = next;
+ }
+
+ return MIDX_CHUNK_FANOUT_SIZE;
+}
+
+static size_t write_midx_oid_lookup(struct hashfile *f, unsigned char hash_len,
+ struct pack_midx_entry *objects,
+ uint32_t nr_objects)
+{
+ struct pack_midx_entry *list = objects;
+ uint32_t i;
+ size_t written = 0;
+
+ for (i = 0; i < nr_objects; i++) {
+ struct pack_midx_entry *obj = list++;
+
+ if (i < nr_objects - 1) {
+ struct pack_midx_entry *next = list;
+ if (oidcmp(&obj->oid, &next->oid) >= 0)
+ BUG("OIDs not in order: %s >= %s",
+ oid_to_hex(&obj->oid),
+ oid_to_hex(&next->oid));
+ }
+
+ hashwrite(f, obj->oid.hash, (int)hash_len);
+ written += hash_len;
+ }
+
+ return written;
+}
+
+static size_t write_midx_object_offsets(struct hashfile *f, int large_offset_needed,
+ struct pack_midx_entry *objects, uint32_t nr_objects)
+{
+ struct pack_midx_entry *list = objects;
+ uint32_t i, nr_large_offset = 0;
+ size_t written = 0;
+
+ for (i = 0; i < nr_objects; i++) {
+ struct pack_midx_entry *obj = list++;
+
+ hashwrite_be32(f, obj->pack_int_id);
+
+ if (large_offset_needed && obj->offset >> 31)
+ hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
+ else if (!large_offset_needed && obj->offset >> 32)
+ BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
+ oid_to_hex(&obj->oid),
+ obj->offset);
+ else
+ hashwrite_be32(f, (uint32_t)obj->offset);
+
+ written += MIDX_CHUNK_OFFSET_WIDTH;
+ }
+
+ return written;
+}
+
+static size_t write_midx_large_offsets(struct hashfile *f, uint32_t nr_large_offset,
+ struct pack_midx_entry *objects, uint32_t nr_objects)
+{
+ struct pack_midx_entry *list = objects;
+ size_t written = 0;
+
+ while (nr_large_offset) {
+ struct pack_midx_entry *obj = list++;
+ uint64_t offset = obj->offset;
+
+ if (!(offset >> 31))
+ continue;
+
+ hashwrite_be32(f, offset >> 32);
+ hashwrite_be32(f, offset & 0xffffffffUL);
+ written += 2 * sizeof(uint32_t);
+
+ nr_large_offset--;
+ }
+
+ return written;
+}
+
+int write_midx_file(const char *object_dir)
+{
+ unsigned char cur_chunk, num_chunks = 0;
+ char *midx_name;
+ uint32_t i;
+ struct hashfile *f = NULL;
+ struct lock_file lk;
+ struct pack_list packs;
+ uint32_t *pack_perm = NULL;
+ uint64_t written = 0;
+ uint32_t chunk_ids[MIDX_MAX_CHUNKS + 1];
+ uint64_t chunk_offsets[MIDX_MAX_CHUNKS + 1];
+ uint32_t nr_entries, num_large_offsets = 0;
+ struct pack_midx_entry *entries = NULL;
+ int large_offsets_needed = 0;
+
+ midx_name = get_midx_filename(object_dir);
+ if (safe_create_leading_directories(midx_name)) {
+ UNLEAK(midx_name);
+ die_errno(_("unable to create leading directories of %s"),
+ midx_name);
+ }
+
+ packs.m = load_multi_pack_index(object_dir, 1);
+
+ packs.nr = 0;
+ packs.alloc_list = packs.m ? packs.m->num_packs : 16;
+ packs.alloc_names = packs.alloc_list;
+ packs.list = NULL;
+ packs.names = NULL;
+ packs.pack_name_concat_len = 0;
+ ALLOC_ARRAY(packs.list, packs.alloc_list);
+ ALLOC_ARRAY(packs.names, packs.alloc_names);
+
+ if (packs.m) {
+ for (i = 0; i < packs.m->num_packs; i++) {
+ ALLOC_GROW(packs.list, packs.nr + 1, packs.alloc_list);
+ ALLOC_GROW(packs.names, packs.nr + 1, packs.alloc_names);
+
+ packs.list[packs.nr] = NULL;
+ packs.names[packs.nr] = xstrdup(packs.m->pack_names[i]);
+ packs.pack_name_concat_len += strlen(packs.names[packs.nr]) + 1;
+ packs.nr++;
+ }
+ }
+
+ for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &packs);
+
+ if (packs.m && packs.nr == packs.m->num_packs)
+ goto cleanup;
+
+ if (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
+ packs.pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
+ (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
+
+ ALLOC_ARRAY(pack_perm, packs.nr);
+ sort_packs_by_name(packs.names, packs.nr, pack_perm);
+
+ entries = get_sorted_entries(packs.m, packs.list, pack_perm, packs.nr, &nr_entries);
+
+ for (i = 0; i < nr_entries; i++) {
+ if (entries[i].offset > 0x7fffffff)
+ num_large_offsets++;
+ if (entries[i].offset > 0xffffffff)
+ large_offsets_needed = 1;
+ }
+
+ hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
+ f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf);
+ FREE_AND_NULL(midx_name);
+
+ if (packs.m)
+ close_midx(packs.m);
+
+ cur_chunk = 0;
+ num_chunks = large_offsets_needed ? 5 : 4;
+
+ written = write_midx_header(f, num_chunks, packs.nr);
+
+ chunk_ids[cur_chunk] = MIDX_CHUNKID_PACKNAMES;
+ chunk_offsets[cur_chunk] = written + (num_chunks + 1) * MIDX_CHUNKLOOKUP_WIDTH;
+
+ cur_chunk++;
+ chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDFANOUT;
+ chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + packs.pack_name_concat_len;
+
+ cur_chunk++;
+ chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDLOOKUP;
+ chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + MIDX_CHUNK_FANOUT_SIZE;
+
+ cur_chunk++;
+ chunk_ids[cur_chunk] = MIDX_CHUNKID_OBJECTOFFSETS;
+ chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_HASH_LEN;
+
+ cur_chunk++;
+ chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_CHUNK_OFFSET_WIDTH;
+ if (large_offsets_needed) {
+ chunk_ids[cur_chunk] = MIDX_CHUNKID_LARGEOFFSETS;
+
+ cur_chunk++;
+ chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] +
+ num_large_offsets * MIDX_CHUNK_LARGE_OFFSET_WIDTH;
+ }
+
+ chunk_ids[cur_chunk] = 0;
+
+ for (i = 0; i <= num_chunks; i++) {
+ if (i && chunk_offsets[i] < chunk_offsets[i - 1])
+ BUG("incorrect chunk offsets: %"PRIu64" before %"PRIu64,
+ chunk_offsets[i - 1],
+ chunk_offsets[i]);
+
+ if (chunk_offsets[i] % MIDX_CHUNK_ALIGNMENT)
+ BUG("chunk offset %"PRIu64" is not properly aligned",
+ chunk_offsets[i]);
+
+ hashwrite_be32(f, chunk_ids[i]);
+ hashwrite_be32(f, chunk_offsets[i] >> 32);
+ hashwrite_be32(f, chunk_offsets[i]);
+
+ written += MIDX_CHUNKLOOKUP_WIDTH;
+ }
+
+ for (i = 0; i < num_chunks; i++) {
+ if (written != chunk_offsets[i])
+ BUG("incorrect chunk offset (%"PRIu64" != %"PRIu64") for chunk id %"PRIx32,
+ chunk_offsets[i],
+ written,
+ chunk_ids[i]);
+
+ switch (chunk_ids[i]) {
+ case MIDX_CHUNKID_PACKNAMES:
+ written += write_midx_pack_names(f, packs.names, packs.nr);
+ break;
+
+ case MIDX_CHUNKID_OIDFANOUT:
+ written += write_midx_oid_fanout(f, entries, nr_entries);
+ break;
+
+ case MIDX_CHUNKID_OIDLOOKUP:
+ written += write_midx_oid_lookup(f, MIDX_HASH_LEN, entries, nr_entries);
+ break;
+
+ case MIDX_CHUNKID_OBJECTOFFSETS:
+ written += write_midx_object_offsets(f, large_offsets_needed, entries, nr_entries);
+ break;
+
+ case MIDX_CHUNKID_LARGEOFFSETS:
+ written += write_midx_large_offsets(f, num_large_offsets, entries, nr_entries);
+ break;
+
+ default:
+ BUG("trying to write unknown chunk id %"PRIx32,
+ chunk_ids[i]);
+ }
+ }
+
+ if (written != chunk_offsets[num_chunks])
+ BUG("incorrect final offset %"PRIu64" != %"PRIu64,
+ written,
+ chunk_offsets[num_chunks]);
+
+ finalize_hashfile(f, NULL, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+ commit_lock_file(&lk);
+
+cleanup:
+ for (i = 0; i < packs.nr; i++) {
+ if (packs.list[i]) {
+ close_pack(packs.list[i]);
+ free(packs.list[i]);
+ }
+ free(packs.names[i]);
+ }
+
+ free(packs.list);
+ free(packs.names);
+ free(entries);
+ free(pack_perm);
+ free(midx_name);
+ return 0;
+}
+
+void clear_midx_file(const char *object_dir)
+{
+ char *midx = get_midx_filename(object_dir);
+
+ if (remove_path(midx)) {
+ UNLEAK(midx);
+ die(_("failed to clear multi-pack-index at %s"), midx);
+ }
+
+ free(midx);
+}
diff --git a/midx.h b/midx.h
new file mode 100644
index 0000000000..a210f1af2a
--- /dev/null
+++ b/midx.h
@@ -0,0 +1,47 @@
+#ifndef __MIDX_H__
+#define __MIDX_H__
+
+#include "repository.h"
+
+struct multi_pack_index {
+ struct multi_pack_index *next;
+
+ int fd;
+
+ const unsigned char *data;
+ size_t data_len;
+
+ uint32_t signature;
+ unsigned char version;
+ unsigned char hash_len;
+ unsigned char num_chunks;
+ uint32_t num_packs;
+ uint32_t num_objects;
+
+ int local;
+
+ const unsigned char *chunk_pack_names;
+ const uint32_t *chunk_oid_fanout;
+ const unsigned char *chunk_oid_lookup;
+ const unsigned char *chunk_object_offsets;
+ const unsigned char *chunk_large_offsets;
+
+ const char **pack_names;
+ struct packed_git **packs;
+ char object_dir[FLEX_ARRAY];
+};
+
+struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local);
+int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id);
+int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result);
+struct object_id *nth_midxed_object_oid(struct object_id *oid,
+ struct multi_pack_index *m,
+ uint32_t n);
+int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m);
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_name);
+int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local);
+
+int write_midx_file(const char *object_dir);
+void clear_midx_file(const char *object_dir);
+
+#endif
diff --git a/name-hash.c b/name-hash.c
index 163849831c..1fcda73cb3 100644
--- a/name-hash.c
+++ b/name-hash.c
@@ -578,10 +578,10 @@ static void threaded_lazy_init_name_hash(
static void lazy_init_name_hash(struct index_state *istate)
{
- uint64_t start = getnanotime();
if (istate->name_hash_initialized)
return;
+ trace_performance_enter();
hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr);
@@ -602,7 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate)
}
istate->name_hash_initialized = 1;
- trace_performance_since(start, "initialize name hash");
+ trace_performance_leave("initialize name hash");
}
/*
diff --git a/notes-merge.c b/notes-merge.c
index 76ab19e702..5764e2b0ef 100644
--- a/notes-merge.c
+++ b/notes-merge.c
@@ -12,6 +12,7 @@
#include "notes-merge.h"
#include "strbuf.h"
#include "notes-utils.h"
+#include "commit-reach.h"
struct notes_merge_pair {
struct object_id obj, base, local, remote;
@@ -151,7 +152,7 @@ static struct notes_merge_pair *diff_tree_remote(struct notes_merge_options *o,
mp = find_notes_merge_pair_pos(changes, len, &obj, 1, &occupied);
if (occupied) {
/* We've found an addition/deletion pair */
- assert(!oidcmp(&mp->obj, &obj));
+ assert(oideq(&mp->obj, &obj));
if (is_null_oid(&p->one->oid)) { /* addition */
assert(is_null_oid(&mp->remote));
oidcpy(&mp->remote, &p->two->oid);
@@ -218,7 +219,7 @@ static void diff_tree_local(struct notes_merge_options *o,
continue;
}
- assert(!oidcmp(&mp->obj, &obj));
+ assert(oideq(&mp->obj, &obj));
if (is_null_oid(&p->two->oid)) { /* deletion */
/*
* Either this is a true deletion (1), or it is part
@@ -229,7 +230,7 @@ static void diff_tree_local(struct notes_merge_options *o,
* (3) mp->local is uninitialized; set it to null_sha1
* (will be overwritten by following addition)
*/
- if (!oidcmp(&mp->local, &uninitialized))
+ if (oideq(&mp->local, &uninitialized))
oidclr(&mp->local);
} else if (is_null_oid(&p->one->oid)) { /* addition */
/*
@@ -241,7 +242,7 @@ static void diff_tree_local(struct notes_merge_options *o,
* (3) mp->local is null_sha1; set to p->two->sha1
*/
assert(is_null_oid(&mp->local) ||
- !oidcmp(&mp->local, &uninitialized));
+ oideq(&mp->local, &uninitialized));
oidcpy(&mp->local, &p->two->oid);
} else { /* modification */
/*
@@ -249,8 +250,8 @@ static void diff_tree_local(struct notes_merge_options *o,
* match mp->base, and mp->local shall be uninitialized.
* Set mp->local to p->two->sha1.
*/
- assert(!oidcmp(&p->one->oid, &mp->base));
- assert(!oidcmp(&mp->local, &uninitialized));
+ assert(oideq(&p->one->oid, &mp->base));
+ assert(oideq(&mp->local, &uninitialized));
oidcpy(&mp->local, &p->two->oid);
}
trace_printf("\t\tStored local change for %s: %.7s -> %.7s\n",
@@ -480,14 +481,14 @@ static int merge_changes(struct notes_merge_options *o,
oid_to_hex(&p->local),
oid_to_hex(&p->remote));
- if (!oidcmp(&p->base, &p->remote)) {
+ if (oideq(&p->base, &p->remote)) {
/* no remote change; nothing to do */
trace_printf("\t\t\tskipping (no remote change)\n");
- } else if (!oidcmp(&p->local, &p->remote)) {
+ } else if (oideq(&p->local, &p->remote)) {
/* same change in local and remote; nothing to do */
trace_printf("\t\t\tskipping (local == remote)\n");
- } else if (!oidcmp(&p->local, &uninitialized) ||
- !oidcmp(&p->local, &p->base)) {
+ } else if (oideq(&p->local, &uninitialized) ||
+ oideq(&p->local, &p->base)) {
/* no local change; adopt remote change */
trace_printf("\t\t\tno local change, adopted remote\n");
if (add_note(t, &p->obj, &p->remote,
@@ -621,14 +622,14 @@ int notes_merge(struct notes_merge_options *o,
oid_to_hex(&local->object.oid),
oid_to_hex(base_oid));
- if (!oidcmp(&remote->object.oid, base_oid)) {
+ if (oideq(&remote->object.oid, base_oid)) {
/* Already merged; result == local commit */
if (o->verbosity >= 2)
printf("Already up to date!\n");
oidcpy(result_oid, &local->object.oid);
goto found_result;
}
- if (!oidcmp(&local->object.oid, base_oid)) {
+ if (oideq(&local->object.oid, base_oid)) {
/* Fast-forward; result == remote commit */
if (o->verbosity >= 2)
printf("Fast-forward\n");
diff --git a/notes.c b/notes.c
index 32d3dbcc1e..25cdce28b7 100644
--- a/notes.c
+++ b/notes.c
@@ -147,7 +147,7 @@ static struct leaf_node *note_tree_find(struct notes_tree *t,
void **p = note_tree_search(t, &tree, &n, key_sha1);
if (GET_PTR_TYPE(*p) == PTR_TYPE_NOTE) {
struct leaf_node *l = (struct leaf_node *) CLR_PTR_TYPE(*p);
- if (!hashcmp(key_sha1, l->key_oid.hash))
+ if (hasheq(key_sha1, l->key_oid.hash))
return l;
}
return NULL;
@@ -206,7 +206,7 @@ static void note_tree_remove(struct notes_tree *t,
if (GET_PTR_TYPE(*p) != PTR_TYPE_NOTE)
return; /* type mismatch, nothing to remove */
l = (struct leaf_node *) CLR_PTR_TYPE(*p);
- if (oidcmp(&l->key_oid, &entry->key_oid))
+ if (!oideq(&l->key_oid, &entry->key_oid))
return; /* key mismatch, nothing to remove */
/* we have found a matching entry */
@@ -266,9 +266,9 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree,
case PTR_TYPE_NOTE:
switch (type) {
case PTR_TYPE_NOTE:
- if (!oidcmp(&l->key_oid, &entry->key_oid)) {
+ if (oideq(&l->key_oid, &entry->key_oid)) {
/* skip concatenation if l == entry */
- if (!oidcmp(&l->val_oid, &entry->val_oid))
+ if (oideq(&l->val_oid, &entry->val_oid))
return 0;
ret = combine_notes(&l->val_oid,
diff --git a/object-store.h b/object-store.h
index 67e66227d9..63b7605a3e 100644
--- a/object-store.h
+++ b/object-store.h
@@ -88,6 +88,8 @@ struct packed_git {
char pack_name[FLEX_ARRAY]; /* more */
};
+struct multi_pack_index;
+
struct raw_object_store {
/*
* Path to the repository's object store.
@@ -113,6 +115,13 @@ struct raw_object_store {
/*
* private data
*
+ * should only be accessed directly by packfile.c and midx.c
+ */
+ struct multi_pack_index *multi_pack_index;
+
+ /*
+ * private data
+ *
* should only be accessed directly by packfile.c
*/
@@ -121,6 +130,12 @@ struct raw_object_store {
struct list_head packed_git_mru;
/*
+ * A linked list containing all packfiles, starting with those
+ * contained in the multi_pack_index.
+ */
+ struct packed_git *all_packs;
+
+ /*
* A fast, rough count of the number of objects in the repository.
* These two fields are not meant for direct access. Use
* approximate_object_count() instead.
diff --git a/object.c b/object.c
index 51c4594515..e54160550c 100644
--- a/object.c
+++ b/object.c
@@ -95,7 +95,7 @@ struct object *lookup_object(struct repository *r, const unsigned char *sha1)
first = i = hash_obj(sha1, r->parsed_objects->obj_hash_size);
while ((obj = r->parsed_objects->obj_hash[i]) != NULL) {
- if (!hashcmp(sha1, obj->oid.hash))
+ if (hasheq(sha1, obj->oid.hash))
break;
i++;
if (i == r->parsed_objects->obj_hash_size)
diff --git a/object.h b/object.h
index 6e28fdd0b4..0feb90ae61 100644
--- a/object.h
+++ b/object.h
@@ -63,12 +63,12 @@ struct object_array {
* fetch-pack.c: 01
* negotiator/default.c: 2--5
* walker.c: 0-2
- * upload-pack.c: 4 11----------------19
+ * upload-pack.c: 4 11-----14 16-----19
* builtin/blame.c: 12-13
* bisect.c: 16
* bundle.c: 16
* http-push.c: 16-----19
- * commit.c: 16-----19
+ * commit-reach.c: 15-------19
* sha1-name.c: 20
* list-objects-filter.c: 21
* builtin/fsck.c: 0--3
diff --git a/oidmap.c b/oidmap.c
index d9fb19ba6a..b0841a0f58 100644
--- a/oidmap.c
+++ b/oidmap.c
@@ -1,14 +1,14 @@
#include "cache.h"
#include "oidmap.h"
-static int cmpfn(const void *hashmap_cmp_fn_data,
- const void *entry, const void *entry_or_key,
- const void *keydata)
+static int oidmap_neq(const void *hashmap_cmp_fn_data,
+ const void *entry, const void *entry_or_key,
+ const void *keydata)
{
const struct oidmap_entry *entry_ = entry;
if (keydata)
- return oidcmp(&entry_->oid, (const struct object_id *) keydata);
- return oidcmp(&entry_->oid,
+ return !oideq(&entry_->oid, (const struct object_id *) keydata);
+ return !oideq(&entry_->oid,
&((const struct oidmap_entry *) entry_or_key)->oid);
}
@@ -21,7 +21,7 @@ static int hash(const struct object_id *oid)
void oidmap_init(struct oidmap *map, size_t initial_size)
{
- hashmap_init(&map->map, cmpfn, NULL, initial_size);
+ hashmap_init(&map->map, oidmap_neq, NULL, initial_size);
}
void oidmap_free(struct oidmap *map, int free_entries)
diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c
index d977e9bacb..fc82f37a02 100644
--- a/pack-bitmap-write.c
+++ b/pack-bitmap-write.c
@@ -11,6 +11,7 @@
#include "pack-bitmap.h"
#include "sha1-lookup.h"
#include "pack-objects.h"
+#include "commit-reach.h"
struct bitmapped_commit {
struct commit *commit;
diff --git a/pack-bitmap.c b/pack-bitmap.c
index f0a1937a1c..5848cc93aa 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -86,10 +86,11 @@ struct bitmap_index {
/* Bitmap result of the last performed walk */
struct bitmap *result;
+ /* "have" bitmap from the last performed walk */
+ struct bitmap *haves;
+
/* Version of the bitmap index */
unsigned int version;
-
- unsigned loaded : 1;
};
static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st)
@@ -303,7 +304,7 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
static int load_pack_bitmap(struct bitmap_index *bitmap_git)
{
- assert(bitmap_git->map && !bitmap_git->loaded);
+ assert(bitmap_git->map);
bitmap_git->bitmaps = kh_init_sha1();
bitmap_git->ext_index.positions = kh_init_sha1_pos();
@@ -318,7 +319,6 @@ static int load_pack_bitmap(struct bitmap_index *bitmap_git)
if (load_bitmap_entries_v1(bitmap_git) < 0)
goto failed;
- bitmap_git->loaded = 1;
return 0;
failed:
@@ -333,9 +333,9 @@ static int open_pack_bitmap(struct bitmap_index *bitmap_git)
struct packed_git *p;
int ret = -1;
- assert(!bitmap_git->map && !bitmap_git->loaded);
+ assert(!bitmap_git->map);
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (open_pack_bitmap_1(bitmap_git, p) == 0)
ret = 0;
}
@@ -735,7 +735,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs)
* from disk. this is the point of no return; after this the rev_list
* becomes invalidated and we must perform the revwalk through bitmaps
*/
- if (!bitmap_git->loaded && load_pack_bitmap(bitmap_git) < 0)
+ if (load_pack_bitmap(bitmap_git) < 0)
goto cleanup;
object_array_clear(&revs->pending);
@@ -759,8 +759,8 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs)
bitmap_and_not(wants_bitmap, haves_bitmap);
bitmap_git->result = wants_bitmap;
+ bitmap_git->haves = haves_bitmap;
- bitmap_free(haves_bitmap);
return bitmap_git;
cleanup:
@@ -845,9 +845,6 @@ void traverse_bitmap_commit_list(struct bitmap_index *bitmap_git,
OBJ_TAG, show_reachable);
show_extended_objects(bitmap_git, show_reachable);
-
- bitmap_free(bitmap_git->result);
- bitmap_git->result = NULL;
}
static uint32_t count_object_type(struct bitmap_index *bitmap_git,
@@ -1114,5 +1111,23 @@ void free_bitmap_index(struct bitmap_index *b)
free(b->ext_index.objects);
free(b->ext_index.hashes);
bitmap_free(b->result);
+ bitmap_free(b->haves);
free(b);
}
+
+int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git,
+ const unsigned char *sha1)
+{
+ int pos;
+
+ if (!bitmap_git)
+ return 0; /* no bitmap loaded */
+ if (!bitmap_git->haves)
+ return 0; /* walk had no "haves" */
+
+ pos = bitmap_position_packfile(bitmap_git, sha1);
+ if (pos < 0)
+ return 0;
+
+ return bitmap_get(bitmap_git->haves, pos);
+}
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 8a04741e12..189dd68ad3 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -53,6 +53,13 @@ int rebuild_existing_bitmaps(struct bitmap_index *, struct packing_data *mapping
khash_sha1 *reused_bitmaps, int show_progress);
void free_bitmap_index(struct bitmap_index *);
+/*
+ * After a traversal has been performed by prepare_bitmap_walk(), this can be
+ * queried to see if a particular object was reachable from any of the
+ * objects flagged as UNINTERESTING.
+ */
+int bitmap_has_sha1_in_uninteresting(struct bitmap_index *, const unsigned char *sha1);
+
void bitmap_writer_show_progress(int show);
void bitmap_writer_set_checksum(unsigned char *sha1);
void bitmap_writer_build_type_index(struct packing_data *to_pack,
diff --git a/pack-check.c b/pack-check.c
index d3a57df34f..fa5f0ff8fa 100644
--- a/pack-check.c
+++ b/pack-check.c
@@ -79,10 +79,10 @@ static int verify_packfile(struct packed_git *p,
} while (offset < pack_sig_ofs);
the_hash_algo->final_fn(hash, &ctx);
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
- if (hashcmp(hash, pack_sig))
+ if (!hasheq(hash, pack_sig))
err = error("%s pack checksum mismatch",
p->pack_name);
- if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+ if (!hasheq(index_base + index_size - the_hash_algo->hexsz, pack_sig))
err = error("%s pack checksum does not match its index",
p->pack_name);
unuse_pack(w_curs);
@@ -180,7 +180,7 @@ int verify_pack_index(struct packed_git *p)
the_hash_algo->init_fn(&ctx);
the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
the_hash_algo->final_fn(hash, &ctx);
- if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+ if (!hasheq(hash, index_base + index_size - the_hash_algo->rawsz))
err = error("Packfile index for %s hash mismatch",
p->pack_name);
return err;
diff --git a/pack-objects.c b/pack-objects.c
index 6ef87e5683..7e624c30eb 100644
--- a/pack-objects.c
+++ b/pack-objects.c
@@ -16,7 +16,7 @@ static uint32_t locate_object_entry_hash(struct packing_data *pdata,
while (pdata->index[i] > 0) {
uint32_t pos = pdata->index[i] - 1;
- if (!hashcmp(sha1, pdata->objects[pos].idx.oid.hash)) {
+ if (hasheq(sha1, pdata->objects[pos].idx.oid.hash)) {
*found = 1;
return i;
}
@@ -99,7 +99,7 @@ static void prepare_in_pack_by_idx(struct packing_data *pdata)
* (i.e. in_pack_idx also zero) should return NULL.
*/
mapping[cnt++] = NULL;
- for (p = get_packed_git(the_repository); p; p = p->next, cnt++) {
+ for (p = get_all_packs(the_repository); p; p = p->next, cnt++) {
if (cnt == nr) {
free(mapping);
return;
@@ -164,6 +164,12 @@ struct object_entry *packlist_alloc(struct packing_data *pdata,
REALLOC_ARRAY(pdata->in_pack, pdata->nr_alloc);
if (pdata->delta_size)
REALLOC_ARRAY(pdata->delta_size, pdata->nr_alloc);
+
+ if (pdata->tree_depth)
+ REALLOC_ARRAY(pdata->tree_depth, pdata->nr_alloc);
+
+ if (pdata->layer)
+ REALLOC_ARRAY(pdata->layer, pdata->nr_alloc);
}
new_entry = pdata->objects + pdata->nr_objects++;
@@ -179,5 +185,30 @@ struct object_entry *packlist_alloc(struct packing_data *pdata,
if (pdata->in_pack)
pdata->in_pack[pdata->nr_objects - 1] = NULL;
+ if (pdata->tree_depth)
+ pdata->tree_depth[pdata->nr_objects - 1] = 0;
+
+ if (pdata->layer)
+ pdata->layer[pdata->nr_objects - 1] = 0;
+
return new_entry;
}
+
+void oe_set_delta_ext(struct packing_data *pdata,
+ struct object_entry *delta,
+ const unsigned char *sha1)
+{
+ struct object_entry *base;
+
+ ALLOC_GROW(pdata->ext_bases, pdata->nr_ext + 1, pdata->alloc_ext);
+ base = &pdata->ext_bases[pdata->nr_ext++];
+ memset(base, 0, sizeof(*base));
+ hashcpy(base->idx.oid.hash, sha1);
+
+ /* These flags mark that we are not part of the actual pack output. */
+ base->preferred_base = 1;
+ base->filled = 1;
+
+ delta->ext_base = 1;
+ delta->delta_idx = base - pdata->ext_bases + 1;
+}
diff --git a/pack-objects.h b/pack-objects.h
index 62806ccc39..2ca39cfcfe 100644
--- a/pack-objects.h
+++ b/pack-objects.h
@@ -103,6 +103,7 @@ struct object_entry {
unsigned no_try_delta:1;
unsigned type_:TYPE_BITS;
unsigned in_pack_type:TYPE_BITS; /* could be delta */
+
unsigned preferred_base:1; /*
* we do not pack this, but is available
* to be used as the base object to delta
@@ -112,6 +113,7 @@ struct object_entry {
unsigned filled:1; /* assigned write-order */
unsigned dfs_state:OE_DFS_STATE_BITS;
unsigned depth:OE_DEPTH_BITS;
+ unsigned ext_base:1; /* delta_idx points outside packlist */
/*
* pahole results on 64-bit linux (gcc and clang)
@@ -147,8 +149,20 @@ struct packing_data {
pthread_mutex_t lock;
#endif
+ /*
+ * This list contains entries for bases which we know the other side
+ * has (e.g., via reachability bitmaps), but which aren't in our
+ * "objects" list.
+ */
+ struct object_entry *ext_bases;
+ uint32_t nr_ext, alloc_ext;
+
uintmax_t oe_size_limit;
uintmax_t oe_delta_size_limit;
+
+ /* delta islands */
+ unsigned int *tree_depth;
+ unsigned char *layer;
};
void prepare_packing_data(struct packing_data *pdata);
@@ -249,9 +263,12 @@ static inline struct object_entry *oe_delta(
const struct packing_data *pack,
const struct object_entry *e)
{
- if (e->delta_idx)
+ if (!e->delta_idx)
+ return NULL;
+ if (e->ext_base)
+ return &pack->ext_bases[e->delta_idx - 1];
+ else
return &pack->objects[e->delta_idx - 1];
- return NULL;
}
static inline void oe_set_delta(struct packing_data *pack,
@@ -264,6 +281,10 @@ static inline void oe_set_delta(struct packing_data *pack,
e->delta_idx = 0;
}
+void oe_set_delta_ext(struct packing_data *pack,
+ struct object_entry *e,
+ const unsigned char *sha1);
+
static inline struct object_entry *oe_delta_child(
const struct packing_data *pack,
const struct object_entry *e)
@@ -384,4 +405,38 @@ static inline void oe_set_delta_size(struct packing_data *pack,
}
}
+static inline unsigned int oe_tree_depth(struct packing_data *pack,
+ struct object_entry *e)
+{
+ if (!pack->tree_depth)
+ return 0;
+ return pack->tree_depth[e - pack->objects];
+}
+
+static inline void oe_set_tree_depth(struct packing_data *pack,
+ struct object_entry *e,
+ unsigned int tree_depth)
+{
+ if (!pack->tree_depth)
+ ALLOC_ARRAY(pack->tree_depth, pack->nr_objects);
+ pack->tree_depth[e - pack->objects] = tree_depth;
+}
+
+static inline unsigned char oe_layer(struct packing_data *pack,
+ struct object_entry *e)
+{
+ if (!pack->layer)
+ return 0;
+ return pack->layer[e - pack->objects];
+}
+
+static inline void oe_set_layer(struct packing_data *pack,
+ struct object_entry *e,
+ unsigned char layer)
+{
+ if (!pack->layer)
+ ALLOC_ARRAY(pack->layer, pack->nr_objects);
+ pack->layer[e - pack->objects] = layer;
+}
+
#endif
diff --git a/pack-write.c b/pack-write.c
index a9d46bc03f..29d17a9bec 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -124,7 +124,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
}
hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
if ((opts->flags & WRITE_IDX_STRICT) &&
- (i && !oidcmp(&list[-2]->oid, &obj->oid)))
+ (i && oideq(&list[-2]->oid, &obj->oid)))
die("The same object %s appears twice in the pack",
oid_to_hex(&obj->oid));
}
@@ -260,7 +260,7 @@ void fixup_pack_header_footer(int pack_fd,
if (partial_pack_offset == 0) {
unsigned char hash[GIT_MAX_RAWSZ];
the_hash_algo->final_fn(hash, &old_hash_ctx);
- if (hashcmp(hash, partial_pack_hash) != 0)
+ if (!hasheq(hash, partial_pack_hash))
die("Unexpected checksum for %s "
"(disk corruption?)", pack_name);
/*
diff --git a/packfile.c b/packfile.c
index ebcb5742ec..841b36182f 100644
--- a/packfile.c
+++ b/packfile.c
@@ -15,6 +15,7 @@
#include "tree-walk.h"
#include "tree.h"
#include "object-store.h"
+#include "midx.h"
char *odb_pack_name(struct strbuf *buf,
const unsigned char *sha1,
@@ -196,6 +197,23 @@ int open_pack_index(struct packed_git *p)
return ret;
}
+uint32_t get_pack_fanout(struct packed_git *p, uint32_t value)
+{
+ const uint32_t *level1_ofs = p->index_data;
+
+ if (!level1_ofs) {
+ if (open_pack_index(p))
+ return 0;
+ level1_ofs = p->index_data;
+ }
+
+ if (p->index_version > 1) {
+ level1_ofs += 2;
+ }
+
+ return ntohl(level1_ofs[value]);
+}
+
static struct packed_git *alloc_packed_git(int extra)
{
struct packed_git *p = xmalloc(st_add(sizeof(*p), extra));
@@ -451,8 +469,19 @@ static int open_packed_git_1(struct packed_git *p)
ssize_t read_result;
const unsigned hashsz = the_hash_algo->rawsz;
- if (!p->index_data && open_pack_index(p))
- return error("packfile %s index unavailable", p->pack_name);
+ if (!p->index_data) {
+ struct multi_pack_index *m;
+ const char *pack_name = strrchr(p->pack_name, '/');
+
+ for (m = the_repository->objects->multi_pack_index;
+ m; m = m->next) {
+ if (midx_contains_pack(m, pack_name))
+ break;
+ }
+
+ if (!m && open_pack_index(p))
+ return error("packfile %s index unavailable", p->pack_name);
+ }
if (!pack_max_fds) {
unsigned int max_fds = get_max_fd_limit();
@@ -503,6 +532,10 @@ static int open_packed_git_1(struct packed_git *p)
" supported (try upgrading GIT to a newer version)",
p->pack_name, ntohl(hdr.hdr_version));
+ /* Skip index checking if in multi-pack-index */
+ if (!p->index_data)
+ return 0;
+
/* Verify the pack matches its index. */
if (p->num_objects != ntohl(hdr.hdr_entries))
return error("packfile %s claims to have %"PRIu32" objects"
@@ -517,7 +550,7 @@ static int open_packed_git_1(struct packed_git *p)
if (read_result != hashsz)
return error("packfile %s signature is unavailable", p->pack_name);
idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2;
- if (hashcmp(hash, idx_hash))
+ if (!hasheq(hash, idx_hash))
return error("packfile %s does not match index", p->pack_name);
return 0;
}
@@ -738,13 +771,14 @@ static void report_pack_garbage(struct string_list *list)
report_helper(list, seen_bits, first, list->nr);
}
-static void prepare_packed_git_one(struct repository *r, char *objdir, int local)
+void for_each_file_in_pack_dir(const char *objdir,
+ each_file_in_pack_dir_fn fn,
+ void *data)
{
struct strbuf path = STRBUF_INIT;
size_t dirnamelen;
DIR *dir;
struct dirent *de;
- struct string_list garbage = STRING_LIST_INIT_DUP;
strbuf_addstr(&path, objdir);
strbuf_addstr(&path, "/pack");
@@ -759,53 +793,87 @@ static void prepare_packed_git_one(struct repository *r, char *objdir, int local
strbuf_addch(&path, '/');
dirnamelen = path.len;
while ((de = readdir(dir)) != NULL) {
- struct packed_git *p;
- size_t base_len;
-
if (is_dot_or_dotdot(de->d_name))
continue;
strbuf_setlen(&path, dirnamelen);
strbuf_addstr(&path, de->d_name);
- base_len = path.len;
- if (strip_suffix_mem(path.buf, &base_len, ".idx")) {
- /* Don't reopen a pack we already have. */
- for (p = r->objects->packed_git; p;
- p = p->next) {
- size_t len;
- if (strip_suffix(p->pack_name, ".pack", &len) &&
- len == base_len &&
- !memcmp(p->pack_name, path.buf, len))
- break;
- }
- if (p == NULL &&
- /*
- * See if it really is a valid .idx file with
- * corresponding .pack file that we can map.
- */
- (p = add_packed_git(path.buf, path.len, local)) != NULL)
- install_packed_git(r, p);
- }
-
- if (!report_garbage)
- continue;
-
- if (ends_with(de->d_name, ".idx") ||
- ends_with(de->d_name, ".pack") ||
- ends_with(de->d_name, ".bitmap") ||
- ends_with(de->d_name, ".keep") ||
- ends_with(de->d_name, ".promisor"))
- string_list_append(&garbage, path.buf);
- else
- report_garbage(PACKDIR_FILE_GARBAGE, path.buf);
+ fn(path.buf, path.len, de->d_name, data);
}
+
closedir(dir);
- report_pack_garbage(&garbage);
- string_list_clear(&garbage, 0);
strbuf_release(&path);
}
+struct prepare_pack_data {
+ struct repository *r;
+ struct string_list *garbage;
+ int local;
+ struct multi_pack_index *m;
+};
+
+static void prepare_pack(const char *full_name, size_t full_name_len,
+ const char *file_name, void *_data)
+{
+ struct prepare_pack_data *data = (struct prepare_pack_data *)_data;
+ struct packed_git *p;
+ size_t base_len = full_name_len;
+
+ if (strip_suffix_mem(full_name, &base_len, ".idx") &&
+ !(data->m && midx_contains_pack(data->m, file_name))) {
+ /* Don't reopen a pack we already have. */
+ for (p = data->r->objects->packed_git; p; p = p->next) {
+ size_t len;
+ if (strip_suffix(p->pack_name, ".pack", &len) &&
+ len == base_len &&
+ !memcmp(p->pack_name, full_name, len))
+ break;
+ }
+
+ if (!p) {
+ p = add_packed_git(full_name, full_name_len, data->local);
+ if (p)
+ install_packed_git(data->r, p);
+ }
+ }
+
+ if (!report_garbage)
+ return;
+
+ if (!strcmp(file_name, "multi-pack-index"))
+ return;
+ if (ends_with(file_name, ".idx") ||
+ ends_with(file_name, ".pack") ||
+ ends_with(file_name, ".bitmap") ||
+ ends_with(file_name, ".keep") ||
+ ends_with(file_name, ".promisor"))
+ string_list_append(data->garbage, full_name);
+ else
+ report_garbage(PACKDIR_FILE_GARBAGE, full_name);
+}
+
+static void prepare_packed_git_one(struct repository *r, char *objdir, int local)
+{
+ struct prepare_pack_data data;
+ struct string_list garbage = STRING_LIST_INIT_DUP;
+
+ data.m = r->objects->multi_pack_index;
+
+ /* look for the multi-pack-index for this object directory */
+ while (data.m && strcmp(data.m->object_dir, objdir))
+ data.m = data.m->next;
+
+ data.r = r;
+ data.garbage = &garbage;
+ data.local = local;
+
+ for_each_file_in_pack_dir(objdir, prepare_pack, &data);
+
+ report_pack_garbage(data.garbage);
+ string_list_clear(data.garbage, 0);
+}
+
static void prepare_packed_git(struct repository *r);
/*
* Give a fast, rough count of the number of objects in the repository. This
@@ -818,10 +886,13 @@ unsigned long approximate_object_count(void)
{
if (!the_repository->objects->approximate_object_count_valid) {
unsigned long count;
+ struct multi_pack_index *m;
struct packed_git *p;
prepare_packed_git(the_repository);
count = 0;
+ for (m = get_multi_pack_index(the_repository); m; m = m->next)
+ count += m->num_objects;
for (p = the_repository->objects->packed_git; p; p = p->next) {
if (open_pack_index(p))
continue;
@@ -893,11 +964,17 @@ static void prepare_packed_git(struct repository *r)
if (r->objects->packed_git_initialized)
return;
+ prepare_multi_pack_index_one(r, r->objects->objectdir, 1);
prepare_packed_git_one(r, r->objects->objectdir, 1);
prepare_alt_odb(r);
- for (alt = r->objects->alt_odb_list; alt; alt = alt->next)
+ for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+ prepare_multi_pack_index_one(r, alt->path, 0);
prepare_packed_git_one(r, alt->path, 0);
+ }
rearrange_packed_git(r);
+
+ r->objects->all_packs = NULL;
+
prepare_packed_git_mru(r);
r->objects->packed_git_initialized = 1;
}
@@ -915,6 +992,36 @@ struct packed_git *get_packed_git(struct repository *r)
return r->objects->packed_git;
}
+struct multi_pack_index *get_multi_pack_index(struct repository *r)
+{
+ prepare_packed_git(r);
+ return r->objects->multi_pack_index;
+}
+
+struct packed_git *get_all_packs(struct repository *r)
+{
+ prepare_packed_git(r);
+
+ if (!r->objects->all_packs) {
+ struct packed_git *p = r->objects->packed_git;
+ struct multi_pack_index *m;
+
+ for (m = r->objects->multi_pack_index; m; m = m->next) {
+ uint32_t i;
+ for (i = 0; i < m->num_packs; i++) {
+ if (!prepare_midx_pack(m, i)) {
+ m->packs[i]->next = p;
+ p = m->packs[i];
+ }
+ }
+ }
+
+ r->objects->all_packs = p;
+ }
+
+ return r->objects->all_packs;
+}
+
struct list_head *get_packed_git_mru(struct repository *r)
{
prepare_packed_git(r);
@@ -1015,7 +1122,7 @@ void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1)
{
unsigned i;
for (i = 0; i < p->num_bad_objects; i++)
- if (!hashcmp(sha1, p->bad_object_sha1 + GIT_SHA1_RAWSZ * i))
+ if (hasheq(sha1, p->bad_object_sha1 + GIT_SHA1_RAWSZ * i))
return;
p->bad_object_sha1 = xrealloc(p->bad_object_sha1,
st_mult(GIT_MAX_RAWSZ,
@@ -1031,8 +1138,8 @@ const struct packed_git *has_packed_and_bad(const unsigned char *sha1)
for (p = the_repository->objects->packed_git; p; p = p->next)
for (i = 0; i < p->num_bad_objects; i++)
- if (!hashcmp(sha1,
- p->bad_object_sha1 + the_hash_algo->rawsz * i))
+ if (hasheq(sha1,
+ p->bad_object_sha1 + the_hash_algo->rawsz * i))
return p;
return NULL;
}
@@ -1830,8 +1937,8 @@ static int fill_pack_entry(const struct object_id *oid,
if (p->num_bad_objects) {
unsigned i;
for (i = 0; i < p->num_bad_objects; i++)
- if (!hashcmp(oid->hash,
- p->bad_object_sha1 + the_hash_algo->rawsz * i))
+ if (hasheq(oid->hash,
+ p->bad_object_sha1 + the_hash_algo->rawsz * i))
return 0;
}
@@ -1856,11 +1963,17 @@ static int fill_pack_entry(const struct object_id *oid,
int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e)
{
struct list_head *pos;
+ struct multi_pack_index *m;
prepare_packed_git(r);
- if (!r->objects->packed_git)
+ if (!r->objects->packed_git && !r->objects->multi_pack_index)
return 0;
+ for (m = r->objects->multi_pack_index; m; m = m->next) {
+ if (fill_midx_entry(oid, e, m))
+ return 1;
+ }
+
list_for_each(pos, &r->objects->packed_git_mru) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
if (fill_pack_entry(oid, e, p)) {
@@ -1923,7 +2036,7 @@ int for_each_packed_object(each_packed_object_fn cb, void *data,
int pack_errors = 0;
prepare_packed_git(the_repository);
- for (p = the_repository->objects->packed_git; p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
diff --git a/packfile.h b/packfile.h
index 630f35cf31..442625723d 100644
--- a/packfile.h
+++ b/packfile.h
@@ -33,6 +33,12 @@ extern char *sha1_pack_index_name(const unsigned char *sha1);
extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
+typedef void each_file_in_pack_dir_fn(const char *full_path, size_t full_path_len,
+ const char *file_pach, void *data);
+void for_each_file_in_pack_dir(const char *objdir,
+ each_file_in_pack_dir_fn fn,
+ void *data);
+
/* A hook to report invalid files in pack directory */
#define PACKDIR_FILE_PACK 1
#define PACKDIR_FILE_IDX 2
@@ -44,6 +50,8 @@ extern void install_packed_git(struct repository *r, struct packed_git *pack);
struct packed_git *get_packed_git(struct repository *r);
struct list_head *get_packed_git_mru(struct repository *r);
+struct multi_pack_index *get_multi_pack_index(struct repository *r);
+struct packed_git *get_all_packs(struct repository *r);
/*
* Give a rough count of objects in the repository. This sacrifices accuracy
@@ -68,6 +76,8 @@ extern int open_pack_index(struct packed_git *);
*/
extern void close_pack_index(struct packed_git *);
+extern uint32_t get_pack_fanout(struct packed_git *p, uint32_t value);
+
extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
extern void close_pack_windows(struct packed_git *);
extern void close_pack(struct packed_git *);
diff --git a/patch-delta.c b/patch-delta.c
index 56e0a5ede2..b5c8594db6 100644
--- a/patch-delta.c
+++ b/patch-delta.c
@@ -40,24 +40,31 @@ void *patch_delta(const void *src_buf, unsigned long src_size,
cmd = *data++;
if (cmd & 0x80) {
unsigned long cp_off = 0, cp_size = 0;
- if (cmd & 0x01) cp_off = *data++;
- if (cmd & 0x02) cp_off |= (*data++ << 8);
- if (cmd & 0x04) cp_off |= (*data++ << 16);
- if (cmd & 0x08) cp_off |= ((unsigned) *data++ << 24);
- if (cmd & 0x10) cp_size = *data++;
- if (cmd & 0x20) cp_size |= (*data++ << 8);
- if (cmd & 0x40) cp_size |= (*data++ << 16);
+#define PARSE_CP_PARAM(bit, var, shift) do { \
+ if (cmd & (bit)) { \
+ if (data >= top) \
+ goto bad_length; \
+ var |= ((unsigned) *data++ << (shift)); \
+ } } while (0)
+ PARSE_CP_PARAM(0x01, cp_off, 0);
+ PARSE_CP_PARAM(0x02, cp_off, 8);
+ PARSE_CP_PARAM(0x04, cp_off, 16);
+ PARSE_CP_PARAM(0x08, cp_off, 24);
+ PARSE_CP_PARAM(0x10, cp_size, 0);
+ PARSE_CP_PARAM(0x20, cp_size, 8);
+ PARSE_CP_PARAM(0x40, cp_size, 16);
+#undef PARSE_CP_PARAM
if (cp_size == 0) cp_size = 0x10000;
if (unsigned_add_overflows(cp_off, cp_size) ||
cp_off + cp_size > src_size ||
cp_size > size)
- break;
+ goto bad_length;
memcpy(out, (char *) src_buf + cp_off, cp_size);
out += cp_size;
size -= cp_size;
} else if (cmd) {
- if (cmd > size)
- break;
+ if (cmd > size || cmd > top - data)
+ goto bad_length;
memcpy(out, data, cmd);
out += cmd;
data += cmd;
@@ -75,6 +82,7 @@ void *patch_delta(const void *src_buf, unsigned long src_size,
/* sanity check */
if (data != top || size != 0) {
+ bad_length:
error("delta replay has gone wild");
bad:
free(dst_buf);
diff --git a/patch-ids.c b/patch-ids.c
index 8f7c25d5db..960ea24054 100644
--- a/patch-ids.c
+++ b/patch-ids.c
@@ -28,14 +28,14 @@ int commit_patch_id(struct commit *commit, struct diff_options *options,
/*
* When we cannot load the full patch-id for both commits for whatever
* reason, the function returns -1 (i.e. return error(...)). Despite
- * the "cmp" in the name of this function, the caller only cares about
+ * the "neq" in the name of this function, the caller only cares about
* the return value being zero (a and b are equivalent) or non-zero (a
* and b are different), and returning non-zero would keep both in the
* result, even if they actually were equivalent, in order to err on
* the side of safety. The actual value being negative does not have
* any significance; only that it is non-zero matters.
*/
-static int patch_id_cmp(const void *cmpfn_data,
+static int patch_id_neq(const void *cmpfn_data,
const void *entry,
const void *entry_or_key,
const void *unused_keydata)
@@ -53,7 +53,7 @@ static int patch_id_cmp(const void *cmpfn_data,
commit_patch_id(b->commit, opt, &b->patch_id, 0))
return error("Could not get patch ID for %s",
oid_to_hex(&b->commit->object.oid));
- return oidcmp(&a->patch_id, &b->patch_id);
+ return !oideq(&a->patch_id, &b->patch_id);
}
int init_patch_ids(struct patch_ids *ids)
@@ -63,7 +63,7 @@ int init_patch_ids(struct patch_ids *ids)
ids->diffopts.detect_rename = 0;
ids->diffopts.flags.recursive = 1;
diff_setup_done(&ids->diffopts);
- hashmap_init(&ids->patches, patch_id_cmp, &ids->diffopts, 256);
+ hashmap_init(&ids->patches, patch_id_neq, &ids->diffopts, 256);
return 0;
}
diff --git a/preload-index.c b/preload-index.c
index 71cd2437a3..f7365761f4 100644
--- a/preload-index.c
+++ b/preload-index.c
@@ -78,7 +78,6 @@ static void preload_index(struct index_state *index,
{
int threads, i, work, offset;
struct thread_data data[MAX_PARALLEL];
- uint64_t start = getnanotime();
if (!core_preload_index)
return;
@@ -88,6 +87,7 @@ static void preload_index(struct index_state *index,
threads = 2;
if (threads < 2)
return;
+ trace_performance_enter();
if (threads > MAX_PARALLEL)
threads = MAX_PARALLEL;
offset = 0;
@@ -109,7 +109,7 @@ static void preload_index(struct index_state *index,
if (pthread_join(p->pthread, NULL))
die("unable to join threaded lstat");
}
- trace_performance_since(start, "preload index");
+ trace_performance_leave("preload index");
}
#endif
diff --git a/pretty.c b/pretty.c
index 98cf5228f9..8ca29e9281 100644
--- a/pretty.c
+++ b/pretty.c
@@ -1304,6 +1304,9 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
if (skip_prefix(placeholder, "(trailers", &arg)) {
struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
+
+ opts.no_divider = 1;
+
if (*arg == ':') {
arg++;
for (;;) {
diff --git a/range-diff.c b/range-diff.c
index b6b9abac26..60edb2f518 100644
--- a/range-diff.c
+++ b/range-diff.c
@@ -38,6 +38,14 @@ static int read_patches(const char *range, struct string_list *list)
argv_array_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges",
"--reverse", "--date-order", "--decorate=no",
+ /*
+ * Choose indicators that are not used anywhere
+ * else in diffs, but still look reasonable
+ * (e.g. will not be confusing when debugging)
+ */
+ "--output-indicator-new=>",
+ "--output-indicator-old=<",
+ "--output-indicator-context=#",
"--no-abbrev-commit", range,
NULL);
cp.out = -1;
@@ -82,6 +90,7 @@ static int read_patches(const char *range, struct string_list *list)
strbuf_addch(&buf, '\n');
if (!util->diff_offset)
util->diff_offset = buf.len;
+ strbuf_addch(&buf, ' ');
strbuf_addbuf(&buf, &line);
} else if (in_header) {
if (starts_with(line.buf, "Author: ")) {
@@ -108,8 +117,19 @@ static int read_patches(const char *range, struct string_list *list)
* we are not interested.
*/
continue;
- else
+ else if (line.buf[0] == '>') {
+ strbuf_addch(&buf, '+');
+ strbuf_add(&buf, line.buf + 1, line.len - 1);
+ } else if (line.buf[0] == '<') {
+ strbuf_addch(&buf, '-');
+ strbuf_add(&buf, line.buf + 1, line.len - 1);
+ } else if (line.buf[0] == '#') {
+ strbuf_addch(&buf, ' ');
+ strbuf_add(&buf, line.buf + 1, line.len - 1);
+ } else {
+ strbuf_addch(&buf, ' ');
strbuf_addbuf(&buf, &line);
+ }
strbuf_addch(&buf, '\n');
util->diffsize++;
@@ -323,7 +343,7 @@ static void output_pair_header(struct diff_options *diffopt,
}
strbuf_addf(buf, "%s\n", color_reset);
- fwrite(buf->buf, buf->len, 1, stdout);
+ fwrite(buf->buf, buf->len, 1, diffopt->file);
}
static struct userdiff_driver no_func_name = {
@@ -409,8 +429,14 @@ static void output(struct string_list *a, struct string_list *b,
strbuf_release(&dashes);
}
+static struct strbuf *output_prefix_cb(struct diff_options *opt, void *data)
+{
+ return data;
+}
+
int show_range_diff(const char *range1, const char *range2,
- int creation_factor, struct diff_options *diffopt)
+ int creation_factor, int dual_color,
+ struct diff_options *diffopt)
{
int res = 0;
@@ -423,9 +449,23 @@ int show_range_diff(const char *range1, const char *range2,
res = error(_("could not parse log for '%s'"), range2);
if (!res) {
+ struct diff_options opts;
+ struct strbuf indent = STRBUF_INIT;
+
+ memcpy(&opts, diffopt, sizeof(opts));
+ opts.output_format = DIFF_FORMAT_PATCH;
+ opts.flags.suppress_diff_headers = 1;
+ opts.flags.dual_color_diffed_diffs = dual_color;
+ opts.output_prefix = output_prefix_cb;
+ strbuf_addstr(&indent, " ");
+ opts.output_prefix_data = &indent;
+ diff_setup_done(&opts);
+
find_exact_matches(&branch1, &branch2);
get_correspondences(&branch1, &branch2, creation_factor);
- output(&branch1, &branch2, diffopt);
+ output(&branch1, &branch2, &opts);
+
+ strbuf_release(&indent);
}
string_list_clear(&branch1, 1);
diff --git a/range-diff.h b/range-diff.h
index 2407d46a30..190593f0c7 100644
--- a/range-diff.h
+++ b/range-diff.h
@@ -3,7 +3,10 @@
#include "diff.h"
+#define RANGE_DIFF_CREATION_FACTOR_DEFAULT 60
+
int show_range_diff(const char *range1, const char *range2,
- int creation_factor, struct diff_options *diffopt);
+ int creation_factor, int dual_color,
+ struct diff_options *diffopt);
#endif
diff --git a/read-cache.c b/read-cache.c
index 7b1354d759..8d04d78a58 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -213,7 +213,7 @@ static int ce_compare_data(const struct cache_entry *ce, struct stat *st)
if (fd >= 0) {
struct object_id oid;
if (!index_fd(&oid, fd, st, OBJ_BLOB, ce->name, 0))
- match = oidcmp(&oid, &ce->oid);
+ match = !oideq(&oid, &ce->oid);
/* index_fd() closed the file descriptor already */
}
return match;
@@ -254,7 +254,7 @@ static int ce_compare_gitlink(const struct cache_entry *ce)
*/
if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
return 0;
- return oidcmp(&oid, &ce->oid);
+ return !oideq(&oid, &ce->oid);
}
static int ce_modified_check_fs(const struct cache_entry *ce, struct stat *st)
@@ -767,7 +767,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
/* It was suspected to be racily clean, but it turns out to be Ok */
was_same = (alias &&
!ce_stage(alias) &&
- !oidcmp(&alias->oid, &ce->oid) &&
+ oideq(&alias->oid, &ce->oid) &&
ce->ce_mode == alias->ce_mode);
if (pretend)
@@ -1476,8 +1476,8 @@ int refresh_index(struct index_state *istate, unsigned int flags,
const char *typechange_fmt;
const char *added_fmt;
const char *unmerged_fmt;
- uint64_t start = getnanotime();
+ trace_performance_enter();
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
@@ -1547,7 +1547,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
replace_index_entry(istate, i, new_entry);
}
- trace_performance_since(start, "refresh index");
+ trace_performance_leave("refresh index");
return has_errors;
}
@@ -1668,7 +1668,7 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size)
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
the_hash_algo->final_fn(hash, &c);
- if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
+ if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
return error("bad index file sha1 signature");
return 0;
}
@@ -2002,7 +2002,6 @@ static void freshen_shared_index(const char *shared_index, int warn)
int read_index_from(struct index_state *istate, const char *path,
const char *gitdir)
{
- uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
char *base_oid_hex;
@@ -2012,8 +2011,9 @@ int read_index_from(struct index_state *istate, const char *path,
if (istate->initialized)
return istate->cache_nr;
+ trace_performance_enter();
ret = do_read_index(istate, path, 0);
- trace_performance_since(start, "read cache %s", path);
+ trace_performance_leave("read cache %s", path);
split_index = istate->split_index;
if (!split_index || is_null_oid(&split_index->base_oid)) {
@@ -2021,6 +2021,7 @@ int read_index_from(struct index_state *istate, const char *path,
return ret;
}
+ trace_performance_enter();
if (split_index->base)
discard_index(split_index->base);
else
@@ -2029,7 +2030,7 @@ int read_index_from(struct index_state *istate, const char *path,
base_oid_hex = oid_to_hex(&split_index->base_oid);
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
ret = do_read_index(split_index->base, base_path, 1);
- if (oidcmp(&split_index->base_oid, &split_index->base->oid))
+ if (!oideq(&split_index->base_oid, &split_index->base->oid))
die("broken index, expect %s in %s, got %s",
base_oid_hex, base_path,
oid_to_hex(&split_index->base->oid));
@@ -2037,8 +2038,8 @@ int read_index_from(struct index_state *istate, const char *path,
freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
- trace_performance_since(start, "read cache %s", base_path);
free(base_path);
+ trace_performance_leave("read cache %s", base_path);
return ret;
}
@@ -2395,7 +2396,7 @@ static int verify_index_from(const struct index_state *istate, const char *path)
if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->oid.hash, hash))
+ if (!hasheq(istate->oid.hash, hash))
goto out;
close(fd);
@@ -2743,6 +2744,9 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
int new_shared_index, ret;
struct split_index *si = istate->split_index;
+ if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
+ cache_tree_verify(istate);
+
if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
if (flags & COMMIT_LOCK)
rollback_lock_file(lock);
@@ -2939,6 +2943,8 @@ void move_index_extensions(struct index_state *dst, struct index_state *src)
{
dst->untracked = src->untracked;
src->untracked = NULL;
+ dst->cache_tree = src->cache_tree;
+ src->cache_tree = NULL;
}
struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
diff --git a/ref-filter.c b/ref-filter.c
index 0bccfceff2..e1bcb4ca8a 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -19,6 +19,7 @@
#include "wt-status.h"
#include "commit-slab.h"
#include "commit-graph.h"
+#include "commit-reach.h"
static struct ref_msg {
const char *gone;
@@ -263,6 +264,8 @@ static int trailers_atom_parser(const struct ref_format *format, struct used_ato
struct string_list params = STRING_LIST_INIT_DUP;
int i;
+ atom->u.contents.trailer_opts.no_divider = 1;
+
if (arg) {
string_list_split(&params, arg, ',', -1);
for (i = 0; i < params.nr; i++) {
@@ -1674,144 +1677,6 @@ static int get_ref_atom_value(struct ref_array_item *ref, int atom,
}
/*
- * Unknown has to be "0" here, because that's the default value for
- * contains_cache slab entries that have not yet been assigned.
- */
-enum contains_result {
- CONTAINS_UNKNOWN = 0,
- CONTAINS_NO,
- CONTAINS_YES
-};
-
-define_commit_slab(contains_cache, enum contains_result);
-
-struct ref_filter_cbdata {
- struct ref_array *array;
- struct ref_filter *filter;
- struct contains_cache contains_cache;
- struct contains_cache no_contains_cache;
-};
-
-/*
- * Mimicking the real stack, this stack lives on the heap, avoiding stack
- * overflows.
- *
- * At each recursion step, the stack items points to the commits whose
- * ancestors are to be inspected.
- */
-struct contains_stack {
- int nr, alloc;
- struct contains_stack_entry {
- struct commit *commit;
- struct commit_list *parents;
- } *contains_stack;
-};
-
-static int in_commit_list(const struct commit_list *want, struct commit *c)
-{
- for (; want; want = want->next)
- if (!oidcmp(&want->item->object.oid, &c->object.oid))
- return 1;
- return 0;
-}
-
-/*
- * Test whether the candidate is contained in the list.
- * Do not recurse to find out, though, but return -1 if inconclusive.
- */
-static enum contains_result contains_test(struct commit *candidate,
- const struct commit_list *want,
- struct contains_cache *cache,
- uint32_t cutoff)
-{
- enum contains_result *cached = contains_cache_at(cache, candidate);
-
- /* If we already have the answer cached, return that. */
- if (*cached)
- return *cached;
-
- /* or are we it? */
- if (in_commit_list(want, candidate)) {
- *cached = CONTAINS_YES;
- return CONTAINS_YES;
- }
-
- /* Otherwise, we don't know; prepare to recurse */
- parse_commit_or_die(candidate);
-
- if (candidate->generation < cutoff)
- return CONTAINS_NO;
-
- return CONTAINS_UNKNOWN;
-}
-
-static void push_to_contains_stack(struct commit *candidate, struct contains_stack *contains_stack)
-{
- ALLOC_GROW(contains_stack->contains_stack, contains_stack->nr + 1, contains_stack->alloc);
- contains_stack->contains_stack[contains_stack->nr].commit = candidate;
- contains_stack->contains_stack[contains_stack->nr++].parents = candidate->parents;
-}
-
-static enum contains_result contains_tag_algo(struct commit *candidate,
- const struct commit_list *want,
- struct contains_cache *cache)
-{
- struct contains_stack contains_stack = { 0, 0, NULL };
- enum contains_result result;
- uint32_t cutoff = GENERATION_NUMBER_INFINITY;
- const struct commit_list *p;
-
- for (p = want; p; p = p->next) {
- struct commit *c = p->item;
- load_commit_graph_info(the_repository, c);
- if (c->generation < cutoff)
- cutoff = c->generation;
- }
-
- result = contains_test(candidate, want, cache, cutoff);
- if (result != CONTAINS_UNKNOWN)
- return result;
-
- push_to_contains_stack(candidate, &contains_stack);
- while (contains_stack.nr) {
- struct contains_stack_entry *entry = &contains_stack.contains_stack[contains_stack.nr - 1];
- struct commit *commit = entry->commit;
- struct commit_list *parents = entry->parents;
-
- if (!parents) {
- *contains_cache_at(cache, commit) = CONTAINS_NO;
- contains_stack.nr--;
- }
- /*
- * If we just popped the stack, parents->item has been marked,
- * therefore contains_test will return a meaningful yes/no.
- */
- else switch (contains_test(parents->item, want, cache, cutoff)) {
- case CONTAINS_YES:
- *contains_cache_at(cache, commit) = CONTAINS_YES;
- contains_stack.nr--;
- break;
- case CONTAINS_NO:
- entry->parents = parents->next;
- break;
- case CONTAINS_UNKNOWN:
- push_to_contains_stack(parents->item, &contains_stack);
- break;
- }
- }
- free(contains_stack.contains_stack);
- return contains_test(candidate, want, cache, cutoff);
-}
-
-static int commit_contains(struct ref_filter *filter, struct commit *commit,
- struct commit_list *list, struct contains_cache *cache)
-{
- if (filter->with_commit_tag_algo)
- return contains_tag_algo(commit, list, cache) == CONTAINS_YES;
- return is_descendant_of(commit, list);
-}
-
-/*
* Return 1 if the refname matches one of the patterns, otherwise 0.
* A pattern can be a literal prefix (e.g. a refname "refs/heads/master"
* matches a pattern "refs/heads/mas") or a wildcard (e.g. the same ref
@@ -2046,6 +1911,13 @@ static int filter_ref_kind(struct ref_filter *filter, const char *refname)
return ref_kind_from_refname(refname);
}
+struct ref_filter_cbdata {
+ struct ref_array *array;
+ struct ref_filter *filter;
+ struct contains_cache contains_cache;
+ struct contains_cache no_contains_cache;
+};
+
/*
* A call-back given to for_each_ref(). Filter refs and keep them for
* later object processing.
diff --git a/refs.c b/refs.c
index de81c7be7c..a7a75b4cc0 100644
--- a/refs.c
+++ b/refs.c
@@ -702,7 +702,7 @@ static int write_pseudoref(const char *pseudoref, const struct object_id *oid,
pseudoref);
rollback_lock_file(&lock);
goto done;
- } else if (oidcmp(&actual_old_oid, old_oid)) {
+ } else if (!oideq(&actual_old_oid, old_oid)) {
strbuf_addf(err, _("unexpected object ID when writing '%s'"),
pseudoref);
rollback_lock_file(&lock);
@@ -744,7 +744,7 @@ static int delete_pseudoref(const char *pseudoref, const struct object_id *old_o
}
if (read_ref(pseudoref, &actual_old_oid))
die(_("could not read ref '%s'"), pseudoref);
- if (oidcmp(&actual_old_oid, old_oid)) {
+ if (!oideq(&actual_old_oid, old_oid)) {
error(_("unexpected object ID when deleting '%s'"),
pseudoref);
rollback_lock_file(&lock);
@@ -875,13 +875,13 @@ static int read_ref_at_ent(struct object_id *ooid, struct object_id *noid,
*/
if (!is_null_oid(&cb->ooid)) {
oidcpy(cb->oid, noid);
- if (oidcmp(&cb->ooid, noid))
+ if (!oideq(&cb->ooid, noid))
warning(_("log for ref %s has gap after %s"),
cb->refname, show_date(cb->date, cb->tz, DATE_MODE(RFC2822)));
}
else if (cb->date == cb->at_time)
oidcpy(cb->oid, noid);
- else if (oidcmp(noid, cb->oid))
+ else if (!oideq(noid, cb->oid))
warning(_("log for ref %s unexpectedly ended on %s"),
cb->refname, show_date(cb->date, cb->tz,
DATE_MODE(RFC2822)));
diff --git a/refs/files-backend.c b/refs/files-backend.c
index 1f1a98e4cb..16ef9325e0 100644
--- a/refs/files-backend.c
+++ b/refs/files-backend.c
@@ -841,7 +841,7 @@ static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
return 0;
}
}
- if (old_oid && oidcmp(&lock->old_oid, old_oid)) {
+ if (old_oid && !oideq(&lock->old_oid, old_oid)) {
strbuf_addf(err, "ref '%s' is at %s but expected %s",
lock->ref_name,
oid_to_hex(&lock->old_oid),
@@ -2307,7 +2307,7 @@ static int check_old_oid(struct ref_update *update, struct object_id *oid,
struct strbuf *err)
{
if (!(update->flags & REF_HAVE_OLD) ||
- !oidcmp(oid, &update->old_oid))
+ oideq(oid, &update->old_oid))
return 0;
if (is_null_oid(&update->old_oid))
@@ -2443,7 +2443,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
!(update->flags & REF_DELETING) &&
!(update->flags & REF_LOG_ONLY)) {
if (!(update->type & REF_ISSYMREF) &&
- !oidcmp(&lock->old_oid, &update->new_oid)) {
+ oideq(&lock->old_oid, &update->new_oid)) {
/*
* The reference already has the desired
* value, so we don't need to write it.
diff --git a/refs/packed-backend.c b/refs/packed-backend.c
index d447a731da..74e2996e93 100644
--- a/refs/packed-backend.c
+++ b/refs/packed-backend.c
@@ -1160,7 +1160,7 @@ static int write_with_updates(struct packed_ref_store *refs,
"reference already exists",
update->refname);
goto error;
- } else if (oidcmp(&update->old_oid, iter->oid)) {
+ } else if (!oideq(&update->old_oid, iter->oid)) {
strbuf_addf(err, "cannot update ref '%s': "
"is at %s but expected %s",
update->refname,
diff --git a/refs/ref-cache.c b/refs/ref-cache.c
index 9b110c8494..b7052f72e2 100644
--- a/refs/ref-cache.c
+++ b/refs/ref-cache.c
@@ -272,7 +272,7 @@ static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2
/* This is impossible by construction */
die("Reference directory conflict: %s", ref1->name);
- if (oidcmp(&ref1->u.value.oid, &ref2->u.value.oid))
+ if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid))
die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
warning("Duplicated ref: %s", ref1->name);
diff --git a/remote-curl.c b/remote-curl.c
index fb28309e85..762a55a75f 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -178,7 +178,7 @@ static int set_option(const char *name, const char *value)
options.no_dependents = 1;
return 0;
} else if (!strcmp(name, "filter")) {
- options.filter = xstrdup(value);;
+ options.filter = xstrdup(value);
return 0;
} else {
return 1 /* unsupported */;
diff --git a/remote.c b/remote.c
index 7f6277a145..682f2a01f9 100644
--- a/remote.c
+++ b/remote.c
@@ -12,6 +12,7 @@
#include "string-list.h"
#include "mergesort.h"
#include "argv-array.h"
+#include "commit-reach.h"
enum map_direction { FROM_SRC, FROM_DST };
@@ -1388,7 +1389,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror,
ref->deletion = is_null_oid(&ref->new_oid);
if (!ref->deletion &&
- !oidcmp(&ref->old_oid, &ref->new_oid)) {
+ oideq(&ref->old_oid, &ref->new_oid)) {
ref->status = REF_STATUS_UPTODATE;
continue;
}
@@ -1403,7 +1404,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror,
* branch.
*/
if (ref->expect_old_sha1) {
- if (oidcmp(&ref->old_oid, &ref->old_oid_expect))
+ if (!oideq(&ref->old_oid, &ref->old_oid_expect))
reject_reason = REF_STATUS_REJECT_STALE;
else
/* If the ref isn't stale then force the update. */
@@ -1791,55 +1792,6 @@ int resolve_remote_symref(struct ref *ref, struct ref *list)
return 1;
}
-static void unmark_and_free(struct commit_list *list, unsigned int mark)
-{
- while (list) {
- struct commit *commit = pop_commit(&list);
- commit->object.flags &= ~mark;
- }
-}
-
-int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
-{
- struct object *o;
- struct commit *old_commit, *new_commit;
- struct commit_list *list, *used;
- int found = 0;
-
- /*
- * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
- * old_commit. Otherwise we require --force.
- */
- o = deref_tag(the_repository, parse_object(the_repository, old_oid),
- NULL, 0);
- if (!o || o->type != OBJ_COMMIT)
- return 0;
- old_commit = (struct commit *) o;
-
- o = deref_tag(the_repository, parse_object(the_repository, new_oid),
- NULL, 0);
- if (!o || o->type != OBJ_COMMIT)
- return 0;
- new_commit = (struct commit *) o;
-
- if (parse_commit(new_commit) < 0)
- return 0;
-
- used = list = NULL;
- commit_list_insert(new_commit, &list);
- while (list) {
- new_commit = pop_most_recent_commit(&list, TMP_MARK);
- commit_list_insert(new_commit, &used);
- if (new_commit == old_commit) {
- found = 1;
- break;
- }
- }
- unmark_and_free(list, TMP_MARK);
- unmark_and_free(used, TMP_MARK);
- return found;
-}
-
/*
* Lookup the upstream branch for the given branch and if present, optionally
* compute the commit ahead/behind values for the pair.
@@ -2049,7 +2001,7 @@ struct ref *guess_remote_head(const struct ref *head,
/* If refs/heads/master could be right, it is. */
if (!all) {
r = find_ref_by_name(refs, "refs/heads/master");
- if (r && !oidcmp(&r->old_oid, &head->old_oid))
+ if (r && oideq(&r->old_oid, &head->old_oid))
return copy_ref(r);
}
@@ -2057,7 +2009,7 @@ struct ref *guess_remote_head(const struct ref *head,
for (r = refs; r; r = r->next) {
if (r != head &&
starts_with(r->name, "refs/heads/") &&
- !oidcmp(&r->old_oid, &head->old_oid)) {
+ oideq(&r->old_oid, &head->old_oid)) {
*tail = copy_ref(r);
tail = &((*tail)->next);
if (!all)
diff --git a/remote.h b/remote.h
index 88f8480c71..da53ad570b 100644
--- a/remote.h
+++ b/remote.h
@@ -151,7 +151,6 @@ extern struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
const struct string_list *server_options);
int resolve_remote_symref(struct ref *ref, struct ref *list);
-int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid);
/*
* Remove and free all but the first of any entries in the input list
diff --git a/rerere.c b/rerere.c
index c7787aa07f..7aa149e849 100644
--- a/rerere.c
+++ b/rerere.c
@@ -213,7 +213,7 @@ static void read_rr(struct string_list *rr)
/* There has to be the hash, tab, path and then NUL */
if (buf.len < 42 || get_sha1_hex(buf.buf, sha1))
- die("corrupt MERGE_RR");
+ die(_("corrupt MERGE_RR"));
if (buf.buf[40] != '.') {
variant = 0;
@@ -222,10 +222,10 @@ static void read_rr(struct string_list *rr)
errno = 0;
variant = strtol(buf.buf + 41, &path, 10);
if (errno)
- die("corrupt MERGE_RR");
+ die(_("corrupt MERGE_RR"));
}
if (*(path++) != '\t')
- die("corrupt MERGE_RR");
+ die(_("corrupt MERGE_RR"));
buf.buf[40] = '\0';
id = new_rerere_id_hex(buf.buf);
id->variant = variant;
@@ -260,12 +260,12 @@ static int write_rr(struct string_list *rr, int out_fd)
rr->items[i].string, 0);
if (write_in_full(out_fd, buf.buf, buf.len) < 0)
- die("unable to write rerere record");
+ die(_("unable to write rerere record"));
strbuf_release(&buf);
}
if (commit_lock_file(&write_lock) != 0)
- die("unable to write rerere record");
+ die(_("unable to write rerere record"));
return 0;
}
@@ -303,38 +303,6 @@ static void rerere_io_putstr(const char *str, struct rerere_io *io)
ferr_puts(str, io->output, &io->wrerror);
}
-/*
- * Write a conflict marker to io->output (if defined).
- */
-static void rerere_io_putconflict(int ch, int size, struct rerere_io *io)
-{
- char buf[64];
-
- while (size) {
- if (size <= sizeof(buf) - 2) {
- memset(buf, ch, size);
- buf[size] = '\n';
- buf[size + 1] = '\0';
- size = 0;
- } else {
- int sz = sizeof(buf) - 1;
-
- /*
- * Make sure we will not write everything out
- * in this round by leaving at least 1 byte
- * for the next round, giving the next round
- * a chance to add the terminating LF. Yuck.
- */
- if (size <= sz)
- sz -= (sz - size) + 1;
- memset(buf, ch, sz);
- buf[sz] = '\0';
- size -= sz;
- }
- rerere_io_putstr(buf, io);
- }
-}
-
static void rerere_io_putmem(const char *mem, size_t sz, struct rerere_io *io)
{
if (io->output)
@@ -385,89 +353,109 @@ static int is_cmarker(char *buf, int marker_char, int marker_size)
return isspace(*buf);
}
-/*
- * Read contents a file with conflicts, normalize the conflicts
- * by (1) discarding the common ancestor version in diff3-style,
- * (2) reordering our side and their side so that whichever sorts
- * alphabetically earlier comes before the other one, while
- * computing the "conflict ID", which is just an SHA-1 hash of
- * one side of the conflict, NUL, the other side of the conflict,
- * and NUL concatenated together.
- *
- * Return the number of conflict hunks found.
- *
- * NEEDSWORK: the logic and theory of operation behind this conflict
- * normalization may deserve to be documented somewhere, perhaps in
- * Documentation/technical/rerere.txt.
- */
-static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_size)
+static void rerere_strbuf_putconflict(struct strbuf *buf, int ch, size_t size)
+{
+ strbuf_addchars(buf, ch, size);
+ strbuf_addch(buf, '\n');
+}
+
+static int handle_conflict(struct strbuf *out, struct rerere_io *io,
+ int marker_size, git_SHA_CTX *ctx)
{
- git_SHA_CTX ctx;
- int hunk_no = 0;
enum {
- RR_CONTEXT = 0, RR_SIDE_1, RR_SIDE_2, RR_ORIGINAL
- } hunk = RR_CONTEXT;
+ RR_SIDE_1 = 0, RR_SIDE_2, RR_ORIGINAL
+ } hunk = RR_SIDE_1;
struct strbuf one = STRBUF_INIT, two = STRBUF_INIT;
- struct strbuf buf = STRBUF_INIT;
-
- if (sha1)
- git_SHA1_Init(&ctx);
+ struct strbuf buf = STRBUF_INIT, conflict = STRBUF_INIT;
+ int has_conflicts = -1;
while (!io->getline(&buf, io)) {
if (is_cmarker(buf.buf, '<', marker_size)) {
- if (hunk != RR_CONTEXT)
- goto bad;
- hunk = RR_SIDE_1;
+ if (handle_conflict(&conflict, io, marker_size, NULL) < 0)
+ break;
+ if (hunk == RR_SIDE_1)
+ strbuf_addbuf(&one, &conflict);
+ else
+ strbuf_addbuf(&two, &conflict);
+ strbuf_release(&conflict);
} else if (is_cmarker(buf.buf, '|', marker_size)) {
if (hunk != RR_SIDE_1)
- goto bad;
+ break;
hunk = RR_ORIGINAL;
} else if (is_cmarker(buf.buf, '=', marker_size)) {
if (hunk != RR_SIDE_1 && hunk != RR_ORIGINAL)
- goto bad;
+ break;
hunk = RR_SIDE_2;
} else if (is_cmarker(buf.buf, '>', marker_size)) {
if (hunk != RR_SIDE_2)
- goto bad;
+ break;
if (strbuf_cmp(&one, &two) > 0)
strbuf_swap(&one, &two);
- hunk_no++;
- hunk = RR_CONTEXT;
- rerere_io_putconflict('<', marker_size, io);
- rerere_io_putmem(one.buf, one.len, io);
- rerere_io_putconflict('=', marker_size, io);
- rerere_io_putmem(two.buf, two.len, io);
- rerere_io_putconflict('>', marker_size, io);
- if (sha1) {
- git_SHA1_Update(&ctx, one.buf ? one.buf : "",
+ has_conflicts = 1;
+ rerere_strbuf_putconflict(out, '<', marker_size);
+ strbuf_addbuf(out, &one);
+ rerere_strbuf_putconflict(out, '=', marker_size);
+ strbuf_addbuf(out, &two);
+ rerere_strbuf_putconflict(out, '>', marker_size);
+ if (ctx) {
+ git_SHA1_Update(ctx, one.buf ? one.buf : "",
one.len + 1);
- git_SHA1_Update(&ctx, two.buf ? two.buf : "",
+ git_SHA1_Update(ctx, two.buf ? two.buf : "",
two.len + 1);
}
- strbuf_reset(&one);
- strbuf_reset(&two);
+ break;
} else if (hunk == RR_SIDE_1)
strbuf_addbuf(&one, &buf);
else if (hunk == RR_ORIGINAL)
; /* discard */
else if (hunk == RR_SIDE_2)
strbuf_addbuf(&two, &buf);
- else
- rerere_io_putstr(buf.buf, io);
- continue;
- bad:
- hunk = 99; /* force error exit */
- break;
}
strbuf_release(&one);
strbuf_release(&two);
strbuf_release(&buf);
+ return has_conflicts;
+}
+
+/*
+ * Read contents a file with conflicts, normalize the conflicts
+ * by (1) discarding the common ancestor version in diff3-style,
+ * (2) reordering our side and their side so that whichever sorts
+ * alphabetically earlier comes before the other one, while
+ * computing the "conflict ID", which is just an SHA-1 hash of
+ * one side of the conflict, NUL, the other side of the conflict,
+ * and NUL concatenated together.
+ *
+ * Return 1 if conflict hunks are found, 0 if there are no conflict
+ * hunks and -1 if an error occured.
+ */
+static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_size)
+{
+ git_SHA_CTX ctx;
+ struct strbuf buf = STRBUF_INIT, out = STRBUF_INIT;
+ int has_conflicts = 0;
+ if (sha1)
+ git_SHA1_Init(&ctx);
+
+ while (!io->getline(&buf, io)) {
+ if (is_cmarker(buf.buf, '<', marker_size)) {
+ has_conflicts = handle_conflict(&out, io, marker_size,
+ sha1 ? &ctx : NULL);
+ if (has_conflicts < 0)
+ break;
+ rerere_io_putmem(out.buf, out.len, io);
+ strbuf_reset(&out);
+ } else
+ rerere_io_putstr(buf.buf, io);
+ }
+ strbuf_release(&buf);
+ strbuf_release(&out);
+
if (sha1)
git_SHA1_Final(sha1, &ctx);
- if (hunk != RR_CONTEXT)
- return -1;
- return hunk_no;
+
+ return has_conflicts;
}
/*
@@ -476,7 +464,7 @@ static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_siz
*/
static int handle_file(const char *path, unsigned char *sha1, const char *output)
{
- int hunk_no = 0;
+ int has_conflicts = 0;
struct rerere_io_file io;
int marker_size = ll_merge_marker_size(path);
@@ -485,34 +473,34 @@ static int handle_file(const char *path, unsigned char *sha1, const char *output
io.input = fopen(path, "r");
io.io.wrerror = 0;
if (!io.input)
- return error_errno("Could not open %s", path);
+ return error_errno(_("could not open '%s'"), path);
if (output) {
io.io.output = fopen(output, "w");
if (!io.io.output) {
- error_errno("Could not write %s", output);
+ error_errno(_("could not write '%s'"), output);
fclose(io.input);
return -1;
}
}
- hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size);
+ has_conflicts = handle_path(sha1, (struct rerere_io *)&io, marker_size);
fclose(io.input);
if (io.io.wrerror)
- error("There were errors while writing %s (%s)",
+ error(_("there were errors while writing '%s' (%s)"),
path, strerror(io.io.wrerror));
if (io.io.output && fclose(io.io.output))
- io.io.wrerror = error_errno("Failed to flush %s", path);
+ io.io.wrerror = error_errno(_("failed to flush '%s'"), path);
- if (hunk_no < 0) {
+ if (has_conflicts < 0) {
if (output)
unlink_or_warn(output);
- return error("Could not parse conflict hunks in %s", path);
+ return error(_("could not parse conflict hunks in '%s'"), path);
}
if (io.io.wrerror)
return -1;
- return hunk_no;
+ return has_conflicts;
}
/*
@@ -533,7 +521,7 @@ static int check_one_conflict(int i, int *type)
}
*type = PUNTED;
- while (ce_stage(active_cache[i]) == 1)
+ while (i < active_nr && ce_stage(active_cache[i]) == 1)
i++;
/* Only handle regular files with both stages #2 and #3 */
@@ -569,7 +557,7 @@ static int find_conflict(struct string_list *conflict)
{
int i;
if (read_cache() < 0)
- return error("Could not read index");
+ return error(_("index file corrupt"));
for (i = 0; i < active_nr;) {
int conflict_type;
@@ -602,7 +590,7 @@ int rerere_remaining(struct string_list *merge_rr)
if (setup_rerere(merge_rr, RERERE_READONLY))
return 0;
if (read_cache() < 0)
- return error("Could not read index");
+ return error(_("index file corrupt"));
for (i = 0; i < active_nr;) {
int conflict_type;
@@ -685,17 +673,17 @@ static int merge(const struct rerere_id *id, const char *path)
* Mark that "postimage" was used to help gc.
*/
if (utime(rerere_path(id, "postimage"), NULL) < 0)
- warning_errno("failed utime() on %s",
+ warning_errno(_("failed utime() on '%s'"),
rerere_path(id, "postimage"));
/* Update "path" with the resolution */
f = fopen(path, "w");
if (!f)
- return error_errno("Could not open %s", path);
+ return error_errno(_("could not open '%s'"), path);
if (fwrite(result.ptr, result.size, 1, f) != 1)
- error_errno("Could not write %s", path);
+ error_errno(_("could not write '%s'"), path);
if (fclose(f))
- return error_errno("Writing %s failed", path);
+ return error_errno(_("writing '%s' failed"), path);
out:
free(cur.ptr);
@@ -715,13 +703,13 @@ static void update_paths(struct string_list *update)
struct string_list_item *item = &update->items[i];
if (add_file_to_cache(item->string, 0))
exit(128);
- fprintf(stderr, "Staged '%s' using previous resolution.\n",
+ fprintf_ln(stderr, _("Staged '%s' using previous resolution."),
item->string);
}
if (write_locked_index(&the_index, &index_lock,
COMMIT_LOCK | SKIP_IF_UNCHANGED))
- die("Unable to write new index file");
+ die(_("unable to write new index file"));
}
static void remove_variant(struct rerere_id *id)
@@ -753,7 +741,7 @@ static void do_rerere_one_path(struct string_list_item *rr_item,
if (!handle_file(path, NULL, NULL)) {
copy_file(rerere_path(id, "postimage"), path, 0666);
id->collection->status[variant] |= RR_HAS_POSTIMAGE;
- fprintf(stderr, "Recorded resolution for '%s'.\n", path);
+ fprintf_ln(stderr, _("Recorded resolution for '%s'."), path);
free_rerere_id(rr_item);
rr_item->util = NULL;
return;
@@ -787,9 +775,9 @@ static void do_rerere_one_path(struct string_list_item *rr_item,
if (rerere_autoupdate)
string_list_insert(update, path);
else
- fprintf(stderr,
- "Resolved '%s' using previous resolution.\n",
- path);
+ fprintf_ln(stderr,
+ _("Resolved '%s' using previous resolution."),
+ path);
free_rerere_id(rr_item);
rr_item->util = NULL;
return;
@@ -803,11 +791,11 @@ static void do_rerere_one_path(struct string_list_item *rr_item,
if (id->collection->status[variant] & RR_HAS_POSTIMAGE) {
const char *path = rerere_path(id, "postimage");
if (unlink(path))
- die_errno("cannot unlink stray '%s'", path);
+ die_errno(_("cannot unlink stray '%s'"), path);
id->collection->status[variant] &= ~RR_HAS_POSTIMAGE;
}
id->collection->status[variant] |= RR_HAS_PREIMAGE;
- fprintf(stderr, "Recorded preimage for '%s'\n", path);
+ fprintf_ln(stderr, _("Recorded preimage for '%s'"), path);
}
static int do_plain_rerere(struct string_list *rr, int fd)
@@ -830,15 +818,16 @@ static int do_plain_rerere(struct string_list *rr, int fd)
const char *path = conflict.items[i].string;
int ret;
- if (string_list_has_string(rr, path))
- continue;
-
/*
* Ask handle_file() to scan and assign a
* conflict ID. No need to write anything out
* yet.
*/
ret = handle_file(path, sha1, NULL);
+ if (ret != 0 && string_list_has_string(rr, path)) {
+ remove_variant(string_list_lookup(rr, path)->util);
+ string_list_remove(rr, path, 1);
+ }
if (ret < 1)
continue;
@@ -879,7 +868,7 @@ static int is_rerere_enabled(void)
return rr_cache_exists;
if (!rr_cache_exists && mkdir_in_gitdir(git_path_rr_cache()))
- die("Could not create directory %s", git_path_rr_cache());
+ die(_("could not create directory '%s'"), git_path_rr_cache());
return 1;
}
@@ -958,7 +947,7 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu
mmfile_t mmfile[3] = {{NULL}};
mmbuffer_t result = {NULL, 0};
const struct cache_entry *ce;
- int pos, len, i, hunk_no;
+ int pos, len, i, has_conflicts;
struct rerere_io_mem io;
int marker_size = ll_merge_marker_size(path);
@@ -1012,11 +1001,11 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu
* Grab the conflict ID and optionally write the original
* contents with conflict markers out.
*/
- hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size);
+ has_conflicts = handle_path(sha1, (struct rerere_io *)&io, marker_size);
strbuf_release(&io.input);
if (io.io.output)
fclose(io.io.output);
- return hunk_no;
+ return has_conflicts;
}
static int rerere_forget_one_path(const char *path, struct string_list *rr)
@@ -1033,7 +1022,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
*/
ret = handle_cache(path, sha1, NULL);
if (ret < 1)
- return error("Could not parse conflict hunks in '%s'", path);
+ return error(_("could not parse conflict hunks in '%s'"), path);
/* Nuke the recorded resolution for the conflict */
id = new_rerere_id(sha1);
@@ -1051,7 +1040,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
handle_cache(path, sha1, rerere_path(id, "thisimage"));
if (read_mmfile(&cur, rerere_path(id, "thisimage"))) {
free(cur.ptr);
- error("Failed to update conflicted state in '%s'", path);
+ error(_("failed to update conflicted state in '%s'"), path);
goto fail_exit;
}
cleanly_resolved = !try_merge(id, path, &cur, &result);
@@ -1062,16 +1051,16 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
}
if (id->collection->status_nr <= id->variant) {
- error("no remembered resolution for '%s'", path);
+ error(_("no remembered resolution for '%s'"), path);
goto fail_exit;
}
filename = rerere_path(id, "postimage");
if (unlink(filename)) {
if (errno == ENOENT)
- error("no remembered resolution for %s", path);
+ error(_("no remembered resolution for '%s'"), path);
else
- error_errno("cannot unlink %s", filename);
+ error_errno(_("cannot unlink '%s'"), filename);
goto fail_exit;
}
@@ -1081,7 +1070,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
* the postimage.
*/
handle_cache(path, sha1, rerere_path(id, "preimage"));
- fprintf(stderr, "Updated preimage for '%s'\n", path);
+ fprintf_ln(stderr, _("Updated preimage for '%s'"), path);
/*
* And remember that we can record resolution for this
@@ -1090,7 +1079,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
item = string_list_insert(rr, path);
free_rerere_id(item);
item->util = id;
- fprintf(stderr, "Forgot resolution for %s\n", path);
+ fprintf(stderr, _("Forgot resolution for '%s'\n"), path);
return 0;
fail_exit:
@@ -1105,7 +1094,7 @@ int rerere_forget(struct pathspec *pathspec)
struct string_list merge_rr = STRING_LIST_INIT_DUP;
if (read_cache() < 0)
- return error("Could not read index");
+ return error(_("index file corrupt"));
fd = setup_rerere(&merge_rr, RERERE_NOAUTOUPDATE);
if (fd < 0)
@@ -1193,7 +1182,7 @@ void rerere_gc(struct string_list *rr)
git_config(git_default_config, NULL);
dir = opendir(git_path("rr-cache"));
if (!dir)
- die_errno("unable to open rr-cache directory");
+ die_errno(_("unable to open rr-cache directory"));
/* Collect stale conflict IDs ... */
while ((e = readdir(dir))) {
struct rerere_dir *rr_dir;
diff --git a/revision.c b/revision.c
index de4dce600d..e18bd530e4 100644
--- a/revision.c
+++ b/revision.c
@@ -24,6 +24,7 @@
#include "packfile.h"
#include "worktree.h"
#include "argv-array.h"
+#include "commit-reach.h"
volatile show_early_output_fn_t show_early_output;
@@ -2318,7 +2319,7 @@ static void NORETURN diagnose_missing_default(const char *def)
*/
int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *opt)
{
- int i, flags, left, seen_dashdash, read_from_stdin, got_rev_arg = 0, revarg_opt;
+ int i, flags, left, seen_dashdash, got_rev_arg = 0, revarg_opt;
struct argv_array prune_data = ARGV_ARRAY_INIT;
const char *submodule = NULL;
@@ -2348,7 +2349,6 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
revarg_opt = opt ? opt->revarg_opt : 0;
if (seen_dashdash)
revarg_opt |= REVARG_CANNOT_BE_FILENAME;
- read_from_stdin = 0;
for (left = i = 1; i < argc; i++) {
const char *arg = argv[i];
if (*arg == '-') {
@@ -2367,7 +2367,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
argv[left++] = arg;
continue;
}
- if (read_from_stdin++)
+ if (revs->read_from_stdin++)
die("--stdin given twice?");
read_revisions_from_stdin(revs, &prune_data);
continue;
@@ -3238,7 +3238,7 @@ static void track_linear(struct rev_info *revs, struct commit *commit)
struct commit_list *p;
for (p = revs->previous_parents; p; p = p->next)
if (p->item == NULL || /* first commit */
- !oidcmp(&p->item->object.oid, &commit->object.oid))
+ oideq(&p->item->object.oid, &commit->object.oid))
break;
revs->linear = p != NULL;
}
diff --git a/revision.h b/revision.h
index 007278cc11..2b30ac270d 100644
--- a/revision.h
+++ b/revision.h
@@ -82,6 +82,11 @@ struct rev_info {
*/
int rev_input_given;
+ /*
+ * Whether we read from stdin due to the --stdin option.
+ */
+ int read_from_stdin;
+
/* topo-sort */
enum rev_sort_order sort_order;
@@ -214,6 +219,17 @@ struct rev_info {
/* notes-specific options: which refs to show */
struct display_notes_opt notes_opt;
+ /* interdiff */
+ const struct object_id *idiff_oid1;
+ const struct object_id *idiff_oid2;
+ const char *idiff_title;
+
+ /* range-diff */
+ const char *rdiff1;
+ const char *rdiff2;
+ int creation_factor;
+ const char *rdiff_title;
+
/* commit counts */
int count_left;
int count_right;
diff --git a/sequencer.c b/sequencer.c
index dc2c58d464..ddb41a62d9 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -30,6 +30,7 @@
#include "oidset.h"
#include "commit-slab.h"
#include "alias.h"
+#include "commit-reach.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
@@ -225,13 +226,16 @@ static const char *get_todo_path(const struct replay_opts *opts)
* Returns 3 when sob exists within conforming footer as last entry
*/
static int has_conforming_footer(struct strbuf *sb, struct strbuf *sob,
- int ignore_footer)
+ size_t ignore_footer)
{
+ struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
struct trailer_info info;
- int i;
+ size_t i;
int found_sob = 0, found_sob_last = 0;
- trailer_info_get(&info, sb->buf);
+ opts.no_divider = 1;
+
+ trailer_info_get(&info, sb->buf, &opts);
if (info.trailer_start == info.trailer_end)
return 0;
@@ -610,7 +614,7 @@ static int is_index_unchanged(void)
if (!(cache_tree_oid = get_cache_tree_oid()))
return -1;
- return !oidcmp(cache_tree_oid, get_commit_tree_oid(head_commit));
+ return oideq(cache_tree_oid, get_commit_tree_oid(head_commit));
}
static int write_author_script(const char *message)
@@ -899,7 +903,7 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
if ((flags & ALLOW_EMPTY))
argv_array_push(&cmd.args, "--allow-empty");
- if (opts->allow_empty_message)
+ if (!(flags & EDIT_MSG))
argv_array_push(&cmd.args, "--allow-empty-message");
if (cmd.err == -1) {
@@ -1217,7 +1221,7 @@ static int parse_head(struct commit **head)
current_head = lookup_commit_reference(the_repository, &oid);
if (!current_head)
return error(_("could not parse HEAD"));
- if (oidcmp(&oid, &current_head->object.oid)) {
+ if (!oideq(&oid, &current_head->object.oid)) {
warning(_("HEAD %s is not a commit!"),
oid_to_hex(&oid));
}
@@ -1287,9 +1291,9 @@ static int try_to_commit(struct strbuf *msg, const char *author,
goto out;
}
- if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
- get_commit_tree_oid(current_head) :
- the_hash_algo->empty_tree, &tree)) {
+ if (!(flags & ALLOW_EMPTY) && oideq(current_head ?
+ get_commit_tree_oid(current_head) :
+ the_hash_algo->empty_tree, &tree)) {
res = 1; /* run 'git commit' to display error message */
goto out;
}
@@ -1313,7 +1317,7 @@ static int try_to_commit(struct strbuf *msg, const char *author,
if (cleanup != COMMIT_MSG_CLEANUP_NONE)
strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
- if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
+ if ((flags & EDIT_MSG) && message_is_empty(msg, cleanup)) {
res = 1; /* run 'git commit' to display error message */
goto out;
}
@@ -1394,7 +1398,7 @@ static int is_original_commit_empty(struct commit *commit)
ptree_oid = the_hash_algo->empty_tree; /* commit is root */
}
- return !oidcmp(ptree_oid, get_commit_tree_oid(commit));
+ return oideq(ptree_oid, get_commit_tree_oid(commit));
}
/*
@@ -1674,7 +1678,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
unborn = get_oid("HEAD", &head);
/* Do we want to generate a root commit? */
if (is_pick_or_similar(command) && opts->have_squash_onto &&
- !oidcmp(&head, &opts->squash_onto)) {
+ oideq(&head, &opts->squash_onto)) {
if (is_fixup(command))
return error(_("cannot fixup root commit"));
flags |= CREATE_ROOT_COMMIT;
@@ -1717,7 +1721,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
oid_to_hex(&commit->object.oid));
if (opts->allow_ff && !is_fixup(command) &&
- ((parent && !oidcmp(&parent->object.oid, &head)) ||
+ ((parent && oideq(&parent->object.oid, &head)) ||
(!parent && unborn))) {
if (is_rebase_i(opts))
write_author_script(msg.message);
@@ -2422,7 +2426,7 @@ static int rollback_is_safe(void)
if (get_oid("HEAD", &actual_head))
oidclr(&actual_head);
- return !oidcmp(&actual_head, &expected_head);
+ return oideq(&actual_head, &expected_head);
}
static int reset_for_rollback(const struct object_id *oid)
@@ -2983,7 +2987,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
}
if (opts->have_squash_onto &&
- !oidcmp(&head_commit->object.oid, &opts->squash_onto)) {
+ oideq(&head_commit->object.oid, &opts->squash_onto)) {
/*
* When the user tells us to "merge" something into a
* "[new root]", let's simply fast-forward to the merge head.
@@ -3052,8 +3056,8 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
* commit, we cannot fast-forward.
*/
can_fast_forward = opts->allow_ff && commit && commit->parents &&
- !oidcmp(&commit->parents->item->object.oid,
- &head_commit->object.oid);
+ oideq(&commit->parents->item->object.oid,
+ &head_commit->object.oid);
/*
* If any merge head is different from the original one, we cannot
@@ -3063,7 +3067,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
struct commit_list *p = commit->parents->next;
for (j = to_merge; j && p; j = j->next, p = p->next)
- if (oidcmp(&j->item->object.oid,
+ if (!oideq(&j->item->object.oid,
&p->item->object.oid)) {
can_fast_forward = 0;
break;
@@ -3131,8 +3135,8 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
write_message("no-ff", 5, git_path_merge_mode(the_repository), 0);
bases = get_merge_bases(head_commit, merge_commit);
- if (bases && !oidcmp(&merge_commit->object.oid,
- &bases->item->object.oid)) {
+ if (bases && oideq(&merge_commit->object.oid,
+ &bases->item->object.oid)) {
ret = 0;
/* skip merging an ancestor of HEAD */
goto leave_merge;
@@ -3378,9 +3382,9 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
*/
if (item->command == TODO_REWORD &&
!get_oid("HEAD", &oid) &&
- (!oidcmp(&item->commit->object.oid, &oid) ||
+ (oideq(&item->commit->object.oid, &oid) ||
(opts->have_squash_onto &&
- !oidcmp(&opts->squash_onto, &oid))))
+ oideq(&opts->squash_onto, &oid))))
to_amend = 1;
return res | error_with_patch(item->commit,
@@ -3595,7 +3599,7 @@ static int commit_staged_changes(struct replay_opts *opts,
if (get_oid_hex(rev.buf, &to_amend))
return error(_("invalid contents: '%s'"),
rebase_path_amend());
- if (!is_clean && oidcmp(&head, &to_amend))
+ if (!is_clean && !oideq(&head, &to_amend))
return error(_("\nYou have uncommitted changes in your "
"working tree. Please, commit them\n"
"first and then run 'git rebase "
@@ -3607,9 +3611,20 @@ static int commit_staged_changes(struct replay_opts *opts,
* the commit message and if there was a squash, let the user
* edit it.
*/
- if (is_clean && !oidcmp(&head, &to_amend) &&
- opts->current_fixup_count > 0 &&
- file_exists(rebase_path_stopped_sha())) {
+ if (!is_clean || !opts->current_fixup_count)
+ ; /* this is not the final fixup */
+ else if (!oideq(&head, &to_amend) ||
+ !file_exists(rebase_path_stopped_sha())) {
+ /* was a final fixup or squash done manually? */
+ if (!is_fixup(peek_command(todo_list, 0))) {
+ unlink(rebase_path_fixup_msg());
+ unlink(rebase_path_squash_msg());
+ unlink(rebase_path_current_fixups());
+ strbuf_reset(&opts->current_fixups);
+ opts->current_fixup_count = 0;
+ }
+ } else {
+ /* we are in a fixup/squash chain */
const char *p = opts->current_fixups.buf;
int len = opts->current_fixups.len;
@@ -3828,7 +3843,7 @@ int sequencer_pick_revisions(struct replay_opts *opts)
return res;
}
-void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag)
+void append_signoff(struct strbuf *msgbuf, size_t ignore_footer, unsigned flag)
{
unsigned no_dup_sob = flag & APPEND_SIGNOFF_DEDUP;
struct strbuf sob = STRBUF_INIT;
@@ -4574,7 +4589,7 @@ int skip_unnecessary_picks(void)
if (item->commit->parents->next)
break; /* merge commit */
parent_oid = &item->commit->parents->item->object.oid;
- if (hashcmp(parent_oid->hash, oid->hash))
+ if (!oideq(parent_oid, oid))
break;
oid = &item->commit->object.oid;
}
diff --git a/sequencer.h b/sequencer.h
index c751c9d6e4..c986bc8251 100644
--- a/sequencer.h
+++ b/sequencer.h
@@ -90,7 +90,14 @@ int rearrange_squash(void);
extern const char sign_off_header[];
-void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag);
+/*
+ * Append a signoff to the commit message in "msgbuf". The ignore_footer
+ * parameter specifies the number of bytes at the end of msgbuf that should
+ * not be considered at all. I.e., they are not checked for existing trailers,
+ * and the new signoff will be spliced into the buffer before those bytes.
+ */
+void append_signoff(struct strbuf *msgbuf, size_t ignore_footer, unsigned flag);
+
void append_conflicts_hint(struct strbuf *msgbuf);
int message_is_empty(const struct strbuf *sb,
enum commit_msg_cleanup_mode cleanup_mode);
diff --git a/server-info.c b/server-info.c
index 41050c2449..e2b2d6a27a 100644
--- a/server-info.c
+++ b/server-info.c
@@ -199,7 +199,7 @@ static void init_pack_info(const char *infofile, int force)
objdir = get_object_directory();
objdirlen = strlen(objdir);
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
/* we ignore things on alternate path since they are
* not available to the pullers in general.
*/
@@ -209,7 +209,7 @@ static void init_pack_info(const char *infofile, int force)
}
num_pack = i;
info = xcalloc(num_pack, sizeof(struct pack_info *));
- for (i = 0, p = get_packed_git(the_repository); p; p = p->next) {
+ for (i = 0, p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
info[i] = xcalloc(1, sizeof(struct pack_info));
diff --git a/sha1-array.c b/sha1-array.c
index 265941fbf4..b94e0ec0f5 100644
--- a/sha1-array.c
+++ b/sha1-array.c
@@ -69,7 +69,7 @@ int oid_array_for_each_unique(struct oid_array *array,
for (i = 0; i < array->nr; i++) {
int ret;
- if (i > 0 && !oidcmp(array->oid + i, array->oid + i - 1))
+ if (i > 0 && oideq(array->oid + i, array->oid + i - 1))
continue;
ret = fn(array->oid + i, data);
if (ret)
diff --git a/sha1-file.c b/sha1-file.c
index 2edf4564f6..a4367b8f04 100644
--- a/sha1-file.c
+++ b/sha1-file.c
@@ -149,10 +149,10 @@ static struct cached_object *find_cached_object(const struct object_id *oid)
struct cached_object *co = cached_objects;
for (i = 0; i < cached_object_nr; i++, co++) {
- if (!oidcmp(&co->oid, oid))
+ if (oideq(&co->oid, oid))
return co;
}
- if (!oidcmp(oid, the_hash_algo->empty_tree))
+ if (oideq(oid, the_hash_algo->empty_tree))
return &empty_tree;
return NULL;
}
@@ -825,7 +825,7 @@ int check_object_signature(const struct object_id *oid, void *map,
if (map) {
hash_object_file(map, size, type, &real_oid);
- return oidcmp(oid, &real_oid) ? -1 : 0;
+ return !oideq(oid, &real_oid) ? -1 : 0;
}
st = open_istream(oid, &obj_type, &size, NULL);
@@ -852,7 +852,7 @@ int check_object_signature(const struct object_id *oid, void *map,
}
the_hash_algo->final_fn(real_oid.hash, &c);
close_istream(st);
- return oidcmp(oid, &real_oid) ? -1 : 0;
+ return !oideq(oid, &real_oid) ? -1 : 0;
}
int git_open_cloexec(const char *name, int flags)
@@ -1671,7 +1671,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
ret);
the_hash_algo->final_fn(parano_oid.hash, &c);
- if (oidcmp(oid, &parano_oid) != 0)
+ if (!oideq(oid, &parano_oid))
die(_("confused by unstable object source data for %s"),
oid_to_hex(oid));
@@ -2213,7 +2213,7 @@ static int check_stream_sha1(git_zstream *stream,
}
the_hash_algo->final_fn(real_sha1, &c);
- if (hashcmp(expected_sha1, real_sha1)) {
+ if (!hasheq(expected_sha1, real_sha1)) {
error(_("sha1 mismatch for %s (expected %s)"), path,
sha1_to_hex(expected_sha1));
return -1;
diff --git a/sha1-name.c b/sha1-name.c
index c9cc1318b7..faa60f69e3 100644
--- a/sha1-name.c
+++ b/sha1-name.c
@@ -12,6 +12,8 @@
#include "packfile.h"
#include "object-store.h"
#include "repository.h"
+#include "midx.h"
+#include "commit-reach.h"
static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
@@ -44,7 +46,7 @@ static void update_candidates(struct disambiguate_state *ds, const struct object
oidcpy(&ds->candidate, current);
ds->candidate_exists = 1;
return;
- } else if (!oidcmp(&ds->candidate, current)) {
+ } else if (oideq(&ds->candidate, current)) {
/* the same as what we already have seen */
return;
}
@@ -149,6 +151,32 @@ static int match_sha(unsigned len, const unsigned char *a, const unsigned char *
return 1;
}
+static void unique_in_midx(struct multi_pack_index *m,
+ struct disambiguate_state *ds)
+{
+ uint32_t num, i, first = 0;
+ const struct object_id *current = NULL;
+ num = m->num_objects;
+
+ if (!num)
+ return;
+
+ bsearch_midx(&ds->bin_pfx, m, &first);
+
+ /*
+ * At this point, "first" is the location of the lowest object
+ * with an object name that could match "bin_pfx". See if we have
+ * 0, 1 or more objects that actually match(es).
+ */
+ for (i = first; i < num && !ds->ambiguous; i++) {
+ struct object_id oid;
+ current = nth_midxed_object_oid(&oid, m, i);
+ if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash))
+ break;
+ update_candidates(ds, current);
+ }
+}
+
static void unique_in_pack(struct packed_git *p,
struct disambiguate_state *ds)
{
@@ -177,8 +205,12 @@ static void unique_in_pack(struct packed_git *p,
static void find_short_packed_object(struct disambiguate_state *ds)
{
+ struct multi_pack_index *m;
struct packed_git *p;
+ for (m = get_multi_pack_index(the_repository); m && !ds->ambiguous;
+ m = m->next)
+ unique_in_midx(m, ds);
for (p = get_packed_git(the_repository); p && !ds->ambiguous;
p = p->next)
unique_in_pack(p, ds);
@@ -529,6 +561,42 @@ static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
return 0;
}
+static void find_abbrev_len_for_midx(struct multi_pack_index *m,
+ struct min_abbrev_data *mad)
+{
+ int match = 0;
+ uint32_t num, first = 0;
+ struct object_id oid;
+ const struct object_id *mad_oid;
+
+ if (!m->num_objects)
+ return;
+
+ num = m->num_objects;
+ mad_oid = mad->oid;
+ match = bsearch_midx(mad_oid, m, &first);
+
+ /*
+ * first is now the position in the packfile where we would insert
+ * mad->hash if it does not exist (or the position of mad->hash if
+ * it does exist). Hence, we consider a maximum of two objects
+ * nearby for the abbreviation length.
+ */
+ mad->init_len = 0;
+ if (!match) {
+ if (nth_midxed_object_oid(&oid, m, first))
+ extend_abbrev_len(&oid, mad);
+ } else if (first < num - 1) {
+ if (nth_midxed_object_oid(&oid, m, first + 1))
+ extend_abbrev_len(&oid, mad);
+ }
+ if (first > 0) {
+ if (nth_midxed_object_oid(&oid, m, first - 1))
+ extend_abbrev_len(&oid, mad);
+ }
+ mad->init_len = mad->cur_len;
+}
+
static void find_abbrev_len_for_pack(struct packed_git *p,
struct min_abbrev_data *mad)
{
@@ -567,8 +635,11 @@ static void find_abbrev_len_for_pack(struct packed_git *p,
static void find_abbrev_len_packed(struct min_abbrev_data *mad)
{
+ struct multi_pack_index *m;
struct packed_git *p;
+ for (m = get_multi_pack_index(the_repository); m; m = m->next)
+ find_abbrev_len_for_midx(m, mad);
for (p = get_packed_git(the_repository); p; p = p->next)
find_abbrev_len_for_pack(p, mad);
}
diff --git a/shallow.c b/shallow.c
index dbe8a2a290..99fd2d1ba0 100644
--- a/shallow.c
+++ b/shallow.c
@@ -16,6 +16,7 @@
#include "list-objects.h"
#include "commit-slab.h"
#include "repository.h"
+#include "commit-reach.h"
void set_alternate_shallow_file(struct repository *r, const char *path, int override)
{
diff --git a/string-list.c b/string-list.c
index 771c455098..1f6063f2a2 100644
--- a/string-list.c
+++ b/string-list.c
@@ -195,16 +195,6 @@ void string_list_clear_func(struct string_list *list, string_list_clear_func_t c
list->nr = list->alloc = 0;
}
-
-void print_string_list(const struct string_list *p, const char *text)
-{
- int i;
- if ( text )
- printf("%s\n", text);
- for (i = 0; i < p->nr; i++)
- printf("%s:%p\n", p->items[i].string, p->items[i].util);
-}
-
struct string_list_item *string_list_append_nodup(struct string_list *list,
char *string)
{
diff --git a/string-list.h b/string-list.h
index ff8f6094a3..18c718c12c 100644
--- a/string-list.h
+++ b/string-list.h
@@ -114,14 +114,6 @@ void filter_string_list(struct string_list *list, int free_util,
string_list_each_func_t want, void *cb_data);
/**
- * Dump a string_list to stdout, useful mainly for debugging
- * purposes. It can take an optional header argument and it writes out
- * the string-pointer pairs of the string_list, each one in its own
- * line.
- */
-void print_string_list(const struct string_list *p, const char *text);
-
-/**
* Free a string_list. The `string` pointer of the items will be freed
* in case the `strdup_strings` member of the string_list is set. The
* second parameter controls if the `util` pointer of the items should
diff --git a/submodule-config.c b/submodule-config.c
index fc2c41b947..e04ba756d9 100644
--- a/submodule-config.c
+++ b/submodule-config.c
@@ -45,7 +45,7 @@ static int config_path_cmp(const void *unused_cmp_data,
const struct submodule_entry *b = entry_or_key;
return strcmp(a->config->path, b->config->path) ||
- oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
+ !oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
}
static int config_name_cmp(const void *unused_cmp_data,
@@ -57,7 +57,7 @@ static int config_name_cmp(const void *unused_cmp_data,
const struct submodule_entry *b = entry_or_key;
return strcmp(a->config->name, b->config->name) ||
- oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
+ !oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
}
static struct submodule_cache *submodule_cache_alloc(void)
diff --git a/submodule.c b/submodule.c
index a2b266fbfa..b53cb6e9c4 100644
--- a/submodule.c
+++ b/submodule.c
@@ -22,6 +22,7 @@
#include "worktree.h"
#include "parse-options.h"
#include "object-store.h"
+#include "commit-reach.h"
static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF;
static struct string_list changed_submodule_names = STRING_LIST_INIT_DUP;
@@ -65,8 +66,7 @@ int is_staging_gitmodules_ok(struct index_state *istate)
if ((pos >= 0) && (pos < istate->cache_nr)) {
struct stat st;
if (lstat(GITMODULES_FILE, &st) == 0 &&
- ie_match_stat(istate, istate->cache[pos], &st,
- CE_MATCH_IGNORE_FSMONITOR) & DATA_CHANGED)
+ ie_match_stat(istate, istate->cache[pos], &st, 0) & DATA_CHANGED)
return 0;
}
@@ -536,7 +536,7 @@ static void show_submodule_header(struct diff_options *o, const char *path,
fast_backward = 1;
}
- if (!oidcmp(one, two)) {
+ if (oideq(one, two)) {
strbuf_release(&sb);
return;
}
diff --git a/t/README b/t/README
index 9028b47d92..3ea6c85460 100644
--- a/t/README
+++ b/t/README
@@ -319,6 +319,14 @@ GIT_TEST_OE_DELTA_SIZE=<n> exercises the uncomon pack-objects code
path where deltas larger than this limit require extra memory
allocation for bookkeeping.
+GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES=<boolean> checks that cache-tree
+records are valid when the index is written out or after a merge. This
+is mostly to catch missing invalidation. Default is true.
+
+GIT_TEST_COMMIT_GRAPH=<boolean>, when true, forces the commit-graph to
+be written after every 'git commit' command, and overrides the
+'core.commitGraph' setting to true.
+
Naming Tests
------------
diff --git a/t/helper/test-delta.c b/t/helper/test-delta.c
index 34c7259248..e749a49c88 100644
--- a/t/helper/test-delta.c
+++ b/t/helper/test-delta.c
@@ -34,8 +34,8 @@ int cmd__delta(int argc, const char **argv)
return 1;
}
from_size = st.st_size;
- from_buf = mmap(NULL, from_size, PROT_READ, MAP_PRIVATE, fd, 0);
- if (from_buf == MAP_FAILED) {
+ from_buf = xmalloc(from_size);
+ if (read_in_full(fd, from_buf, from_size) < 0) {
perror(argv[2]);
close(fd);
return 1;
@@ -48,8 +48,8 @@ int cmd__delta(int argc, const char **argv)
return 1;
}
data_size = st.st_size;
- data_buf = mmap(NULL, data_size, PROT_READ, MAP_PRIVATE, fd, 0);
- if (data_buf == MAP_FAILED) {
+ data_buf = xmalloc(data_size);
+ if (read_in_full(fd, data_buf, data_size) < 0) {
perror(argv[3]);
close(fd);
return 1;
diff --git a/t/helper/test-dump-cache-tree.c b/t/helper/test-dump-cache-tree.c
index 98a4891f1d..6a3f88f5f5 100644
--- a/t/helper/test-dump-cache-tree.c
+++ b/t/helper/test-dump-cache-tree.c
@@ -33,7 +33,7 @@ static int dump_cache_tree(struct cache_tree *it,
}
else {
dump_one(it, pfx, "");
- if (oidcmp(&it->oid, &ref->oid) ||
+ if (!oideq(&it->oid, &ref->oid) ||
ref->entry_count != it->entry_count ||
ref->subtree_nr != it->subtree_nr) {
/* claims to be valid but is lying */
diff --git a/t/helper/test-reach.c b/t/helper/test-reach.c
new file mode 100644
index 0000000000..08d2ea68e8
--- /dev/null
+++ b/t/helper/test-reach.c
@@ -0,0 +1,142 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "commit.h"
+#include "commit-reach.h"
+#include "config.h"
+#include "parse-options.h"
+#include "ref-filter.h"
+#include "string-list.h"
+#include "tag.h"
+
+static void print_sorted_commit_ids(struct commit_list *list)
+{
+ int i;
+ struct string_list s = STRING_LIST_INIT_DUP;
+
+ while (list) {
+ string_list_append(&s, oid_to_hex(&list->item->object.oid));
+ list = list->next;
+ }
+
+ string_list_sort(&s);
+
+ for (i = 0; i < s.nr; i++)
+ printf("%s\n", s.items[i].string);
+
+ string_list_clear(&s, 0);
+}
+
+int cmd__reach(int ac, const char **av)
+{
+ struct object_id oid_A, oid_B;
+ struct commit *A, *B;
+ struct commit_list *X, *Y;
+ struct object_array X_obj = OBJECT_ARRAY_INIT;
+ struct commit **X_array;
+ int X_nr, X_alloc;
+ struct strbuf buf = STRBUF_INIT;
+ struct repository *r = the_repository;
+
+ setup_git_directory();
+
+ if (ac < 2)
+ exit(1);
+
+ A = B = NULL;
+ X = Y = NULL;
+ X_nr = 0;
+ X_alloc = 16;
+ ALLOC_ARRAY(X_array, X_alloc);
+
+ while (strbuf_getline(&buf, stdin) != EOF) {
+ struct object_id oid;
+ struct object *orig;
+ struct object *peeled;
+ struct commit *c;
+ if (buf.len < 3)
+ continue;
+
+ if (get_oid_committish(buf.buf + 2, &oid))
+ die("failed to resolve %s", buf.buf + 2);
+
+ orig = parse_object(r, &oid);
+ peeled = deref_tag_noverify(orig);
+
+ if (!peeled)
+ die("failed to load commit for input %s resulting in oid %s\n",
+ buf.buf, oid_to_hex(&oid));
+
+ c = object_as_type(r, peeled, OBJ_COMMIT, 0);
+
+ if (!c)
+ die("failed to load commit for input %s resulting in oid %s\n",
+ buf.buf, oid_to_hex(&oid));
+
+ switch (buf.buf[0]) {
+ case 'A':
+ oidcpy(&oid_A, &oid);
+ A = c;
+ break;
+
+ case 'B':
+ oidcpy(&oid_B, &oid);
+ B = c;
+ break;
+
+ case 'X':
+ commit_list_insert(c, &X);
+ ALLOC_GROW(X_array, X_nr + 1, X_alloc);
+ X_array[X_nr++] = c;
+ add_object_array(orig, NULL, &X_obj);
+ break;
+
+ case 'Y':
+ commit_list_insert(c, &Y);
+ break;
+
+ default:
+ die("unexpected start of line: %c", buf.buf[0]);
+ }
+ }
+ strbuf_release(&buf);
+
+ if (!strcmp(av[1], "ref_newer"))
+ printf("%s(A,B):%d\n", av[1], ref_newer(&oid_A, &oid_B));
+ else if (!strcmp(av[1], "in_merge_bases"))
+ printf("%s(A,B):%d\n", av[1], in_merge_bases(A, B));
+ else if (!strcmp(av[1], "is_descendant_of"))
+ printf("%s(A,X):%d\n", av[1], is_descendant_of(A, X));
+ else if (!strcmp(av[1], "get_merge_bases_many")) {
+ struct commit_list *list = get_merge_bases_many(A, X_nr, X_array);
+ printf("%s(A,X):\n", av[1]);
+ print_sorted_commit_ids(list);
+ } else if (!strcmp(av[1], "reduce_heads")) {
+ struct commit_list *list = reduce_heads(X);
+ printf("%s(X):\n", av[1]);
+ print_sorted_commit_ids(list);
+ } else if (!strcmp(av[1], "can_all_from_reach")) {
+ printf("%s(X,Y):%d\n", av[1], can_all_from_reach(X, Y, 1));
+ } else if (!strcmp(av[1], "can_all_from_reach_with_flag")) {
+ struct commit_list *iter = Y;
+
+ while (iter) {
+ iter->item->object.flags |= 2;
+ iter = iter->next;
+ }
+
+ printf("%s(X,_,_,0,0):%d\n", av[1], can_all_from_reach_with_flag(&X_obj, 2, 4, 0, 0));
+ } else if (!strcmp(av[1], "commit_contains")) {
+ struct ref_filter filter;
+ struct contains_cache cache;
+ init_contains_cache(&cache);
+
+ if (ac > 2 && !strcmp(av[2], "--tag"))
+ filter.with_commit_tag_algo = 1;
+ else
+ filter.with_commit_tag_algo = 0;
+
+ printf("%s(_,A,X,_):%d\n", av[1], commit_contains(&filter, A, X, &cache));
+ }
+
+ exit(0);
+}
diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c
new file mode 100644
index 0000000000..831b586d02
--- /dev/null
+++ b/t/helper/test-read-midx.c
@@ -0,0 +1,51 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "midx.h"
+#include "repository.h"
+#include "object-store.h"
+
+static int read_midx_file(const char *object_dir)
+{
+ uint32_t i;
+ struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
+
+ if (!m)
+ return 1;
+
+ printf("header: %08x %d %d %d\n",
+ m->signature,
+ m->version,
+ m->num_chunks,
+ m->num_packs);
+
+ printf("chunks:");
+
+ if (m->chunk_pack_names)
+ printf(" pack-names");
+ if (m->chunk_oid_fanout)
+ printf(" oid-fanout");
+ if (m->chunk_oid_lookup)
+ printf(" oid-lookup");
+ if (m->chunk_object_offsets)
+ printf(" object-offsets");
+ if (m->chunk_large_offsets)
+ printf(" large-offsets");
+
+ printf("\nnum_objects: %d\n", m->num_objects);
+
+ printf("packs:\n");
+ for (i = 0; i < m->num_packs; i++)
+ printf("%s\n", m->pack_names[i]);
+
+ printf("object-dir: %s\n", m->object_dir);
+
+ return 0;
+}
+
+int cmd__read_midx(int argc, const char **argv)
+{
+ if (argc != 2)
+ usage("read-midx <object-dir>");
+
+ return read_midx_file(argv[1]);
+}
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
index 0edafcfd65..b87a8c1f22 100644
--- a/t/helper/test-tool.c
+++ b/t/helper/test-tool.c
@@ -27,7 +27,9 @@ static struct test_cmd cmds[] = {
{ "online-cpus", cmd__online_cpus },
{ "path-utils", cmd__path_utils },
{ "prio-queue", cmd__prio_queue },
+ { "reach", cmd__reach },
{ "read-cache", cmd__read_cache },
+ { "read-midx", cmd__read_midx },
{ "ref-store", cmd__ref_store },
{ "regex", cmd__regex },
{ "repository", cmd__repository },
@@ -43,6 +45,9 @@ static struct test_cmd cmds[] = {
{ "subprocess", cmd__subprocess },
{ "urlmatch-normalization", cmd__urlmatch_normalization },
{ "wildmatch", cmd__wildmatch },
+#ifdef GIT_WINDOWS_NATIVE
+ { "windows-named-pipe", cmd__windows_named_pipe },
+#endif
{ "write-cache", cmd__write_cache },
};
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
index e954e8c522..e074957279 100644
--- a/t/helper/test-tool.h
+++ b/t/helper/test-tool.h
@@ -23,7 +23,9 @@ int cmd__mktemp(int argc, const char **argv);
int cmd__online_cpus(int argc, const char **argv);
int cmd__path_utils(int argc, const char **argv);
int cmd__prio_queue(int argc, const char **argv);
+int cmd__reach(int argc, const char **argv);
int cmd__read_cache(int argc, const char **argv);
+int cmd__read_midx(int argc, const char **argv);
int cmd__ref_store(int argc, const char **argv);
int cmd__regex(int argc, const char **argv);
int cmd__repository(int argc, const char **argv);
@@ -39,6 +41,9 @@ int cmd__submodule_config(int argc, const char **argv);
int cmd__subprocess(int argc, const char **argv);
int cmd__urlmatch_normalization(int argc, const char **argv);
int cmd__wildmatch(int argc, const char **argv);
+#ifdef GIT_WINDOWS_NATIVE
+int cmd__windows_named_pipe(int argc, const char **argv);
+#endif
int cmd__write_cache(int argc, const char **argv);
#endif
diff --git a/t/helper/test-windows-named-pipe.c b/t/helper/test-windows-named-pipe.c
new file mode 100644
index 0000000000..b4b752b01a
--- /dev/null
+++ b/t/helper/test-windows-named-pipe.c
@@ -0,0 +1,72 @@
+#include "test-tool.h"
+#include "git-compat-util.h"
+#include "strbuf.h"
+
+#ifdef GIT_WINDOWS_NATIVE
+static const char *usage_string = "<pipe-filename>";
+
+#define TEST_BUFSIZE (4096)
+
+int cmd__windows_named_pipe(int argc, const char **argv)
+{
+ const char *filename;
+ struct strbuf pathname = STRBUF_INIT;
+ int err;
+ HANDLE h;
+ BOOL connected;
+ char buf[TEST_BUFSIZE + 1];
+
+ if (argc < 2)
+ goto print_usage;
+ filename = argv[1];
+ if (strchr(filename, '/') || strchr(filename, '\\'))
+ goto print_usage;
+ strbuf_addf(&pathname, "//./pipe/%s", filename);
+
+ /*
+ * Create a single instance of the server side of the named pipe.
+ * This will allow exactly one client instance to connect to it.
+ */
+ h = CreateNamedPipeA(
+ pathname.buf,
+ PIPE_ACCESS_INBOUND | FILE_FLAG_FIRST_PIPE_INSTANCE,
+ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
+ PIPE_UNLIMITED_INSTANCES,
+ TEST_BUFSIZE, TEST_BUFSIZE, 0, NULL);
+ if (h == INVALID_HANDLE_VALUE) {
+ err = err_win_to_posix(GetLastError());
+ fprintf(stderr, "CreateNamedPipe failed: %s\n",
+ strerror(err));
+ return err;
+ }
+
+ connected = ConnectNamedPipe(h, NULL)
+ ? TRUE
+ : (GetLastError() == ERROR_PIPE_CONNECTED);
+ if (!connected) {
+ err = err_win_to_posix(GetLastError());
+ fprintf(stderr, "ConnectNamedPipe failed: %s\n",
+ strerror(err));
+ CloseHandle(h);
+ return err;
+ }
+
+ while (1) {
+ DWORD nbr;
+ BOOL success = ReadFile(h, buf, TEST_BUFSIZE, &nbr, NULL);
+ if (!success || nbr == 0)
+ break;
+ buf[nbr] = 0;
+
+ write(1, buf, nbr);
+ }
+
+ DisconnectNamedPipe(h);
+ CloseHandle(h);
+ return 0;
+
+print_usage:
+ fprintf(stderr, "usage: %s %s\n", argv[0], usage_string);
+ return 1;
+}
+#endif
diff --git a/t/perf/README b/t/perf/README
index 21321a0f36..be12090c38 100644
--- a/t/perf/README
+++ b/t/perf/README
@@ -168,3 +168,28 @@ that
While we have tried to make sure that it can cope with embedded
whitespace and other special characters, it will not work with
multi-line data.
+
+Rather than tracking the performance by run-time as `test_perf` does, you
+may also track output size by using `test_size`. The stdout of the
+function should be a single numeric value, which will be captured and
+shown in the aggregated output. For example:
+
+ test_perf 'time foo' '
+ ./foo >foo.out
+ '
+
+ test_size 'output size'
+ wc -c <foo.out
+ '
+
+might produce output like:
+
+ Test origin HEAD
+ -------------------------------------------------------------
+ 1234.1 time foo 0.37(0.79+0.02) 0.26(0.51+0.02) -29.7%
+ 1234.2 output size 4.3M 3.6M -14.7%
+
+The item being measured (and its units) is up to the test; the context
+and the test title should make it clear to the user whether bigger or
+smaller numbers are better. Unlike test_perf, the test code will only be
+run once, since output sizes tend to be more deterministic than timings.
diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl
index bc865160e7..494907a892 100755
--- a/t/perf/aggregate.perl
+++ b/t/perf/aggregate.perl
@@ -13,27 +13,42 @@ sub get_times {
my $line = <$fh>;
return undef if not defined $line;
close $fh or die "cannot close $name: $!";
- $line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/
- or die "bad input line: $line";
- my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
- return ($rt, $4, $5);
+ # times
+ if ($line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/) {
+ my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
+ return ($rt, $4, $5);
+ # size
+ } elsif ($line =~ /^\d+$/) {
+ return $&;
+ } else {
+ die "bad input line: $line";
+ }
+}
+
+sub relative_change {
+ my ($r, $firstr) = @_;
+ if ($firstr > 0) {
+ return sprintf "%+.1f%%", 100.0*($r-$firstr)/$firstr;
+ } elsif ($r == 0) {
+ return "=";
+ } else {
+ return "+inf";
+ }
}
sub format_times {
my ($r, $u, $s, $firstr) = @_;
+ # no value means we did not finish the test
if (!defined $r) {
return "<missing>";
}
- my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
- if (defined $firstr) {
- if ($firstr > 0) {
- $out .= sprintf " %+.1f%%", 100.0*($r-$firstr)/$firstr;
- } elsif ($r == 0) {
- $out .= " =";
- } else {
- $out .= " +inf";
- }
+ # a single value means we have a size, not times
+ if (!defined $u) {
+ return format_size($r, $firstr);
}
+ # otherwise, we have real/user/system times
+ my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
+ $out .= ' ' . relative_change($r, $firstr) if defined $firstr;
return $out;
}
@@ -51,6 +66,25 @@ EOT
exit(1);
}
+sub human_size {
+ my $n = shift;
+ my @units = ('', qw(K M G));
+ while ($n > 900 && @units > 1) {
+ $n /= 1000;
+ shift @units;
+ }
+ return $n unless length $units[0];
+ return sprintf '%.1f%s', $n, $units[0];
+}
+
+sub format_size {
+ my ($size, $first) = @_;
+ # match the width of a time: 0.00(0.00+0.00)
+ my $out = sprintf '%15s', human_size($size);
+ $out .= ' ' . relative_change($size, $first) if defined $first;
+ return $out;
+}
+
my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
$codespeed, $sortby, $subsection, $reponame);
@@ -181,7 +215,14 @@ sub print_default_results {
my $firstr;
for my $i (0..$#dirs) {
my $d = $dirs[$i];
- $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
+ my $base = "$resultsdir/$prefixes{$d}$t";
+ $times{$prefixes{$d}.$t} = [];
+ foreach my $type (qw(times size)) {
+ if (-e "$base.$type") {
+ $times{$prefixes{$d}.$t} = [get_times("$base.$type")];
+ last;
+ }
+ }
my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
my $w = length format_times($r,$u,$s,$firstr);
$colwidth[$i] = $w if $w > $colwidth[$i];
diff --git a/t/perf/p5311-pack-bitmaps-fetch.sh b/t/perf/p5311-pack-bitmaps-fetch.sh
new file mode 100755
index 0000000000..b04575951f
--- /dev/null
+++ b/t/perf/p5311-pack-bitmaps-fetch.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+test_description='performance of fetches from bitmapped packs'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'create bitmapped server repo' '
+ git config pack.writebitmaps true &&
+ git config pack.writebitmaphashcache true &&
+ git repack -ad
+'
+
+# simulate a fetch from a repository that last fetched N days ago, for
+# various values of N. We do so by following the first-parent chain,
+# and assume the first entry in the chain that is N days older than the current
+# HEAD is where the HEAD would have been then.
+for days in 1 2 4 8 16 32 64 128; do
+ title=$(printf '%10s' "($days days)")
+ test_expect_success "setup revs from $days days ago" '
+ now=$(git log -1 --format=%ct HEAD) &&
+ then=$(($now - ($days * 86400))) &&
+ tip=$(git rev-list -1 --first-parent --until=$then HEAD) &&
+ {
+ echo HEAD &&
+ echo ^$tip
+ } >revs
+ '
+
+ test_perf "server $title" '
+ git pack-objects --stdout --revs \
+ --thin --delta-base-offset \
+ <revs >tmp.pack
+ '
+
+ test_size "size $title" '
+ wc -c <tmp.pack
+ '
+
+ test_perf "client $title" '
+ git index-pack --stdin --fix-thin <tmp.pack
+ '
+done
+
+test_done
diff --git a/t/perf/perf-lib.sh b/t/perf/perf-lib.sh
index e4c343a6b7..11d1922cf5 100644
--- a/t/perf/perf-lib.sh
+++ b/t/perf/perf-lib.sh
@@ -179,8 +179,8 @@ exit $ret' >&3 2>&4
return "$eval_ret"
}
-
-test_perf () {
+test_wrapper_ () {
+ test_wrapper_func_=$1; shift
test_start_
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 2 ||
@@ -191,35 +191,57 @@ test_perf () {
base=$(basename "$0" .sh)
echo "$test_count" >>"$perf_results_dir"/$base.subtests
echo "$1" >"$perf_results_dir"/$base.$test_count.descr
- if test -z "$verbose"; then
- printf "%s" "perf $test_count - $1:"
- else
- echo "perf $test_count - $1:"
- fi
- for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
- say >&3 "running: $2"
- if test_run_perf_ "$2"
- then
- if test -z "$verbose"; then
- printf " %s" "$i"
- else
- echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
- fi
+ base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
+ "$test_wrapper_func_" "$@"
+ fi
+
+ test_finish_
+}
+
+test_perf_ () {
+ if test -z "$verbose"; then
+ printf "%s" "perf $test_count - $1:"
+ else
+ echo "perf $test_count - $1:"
+ fi
+ for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
+ say >&3 "running: $2"
+ if test_run_perf_ "$2"
+ then
+ if test -z "$verbose"; then
+ printf " %s" "$i"
else
- test -z "$verbose" && echo
- test_failure_ "$@"
- break
+ echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
fi
- done
- if test -z "$verbose"; then
- echo " ok"
else
- test_ok_ "$1"
+ test -z "$verbose" && echo
+ test_failure_ "$@"
+ break
fi
- base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
- "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+ done
+ if test -z "$verbose"; then
+ echo " ok"
+ else
+ test_ok_ "$1"
fi
- test_finish_
+ "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+}
+
+test_perf () {
+ test_wrapper_ test_perf_ "$@"
+}
+
+test_size_ () {
+ say >&3 "running: $2"
+ if test_eval_ "$2" 3>"$base".size; then
+ test_ok_ "$1"
+ else
+ test_failure_ "$@"
+ fi
+}
+
+test_size () {
+ test_wrapper_ test_size_ "$@"
}
# We extend test_done to print timings at the end (./run disables this
diff --git a/t/t0051-windows-named-pipe.sh b/t/t0051-windows-named-pipe.sh
new file mode 100755
index 0000000000..10ac92d225
--- /dev/null
+++ b/t/t0051-windows-named-pipe.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+test_description='Windows named pipes'
+
+. ./test-lib.sh
+
+test_expect_success MINGW 'o_append write to named pipe' '
+ GIT_TRACE="$(pwd)/expect" git status >/dev/null 2>&1 &&
+ { test-tool windows-named-pipe t0051 >actual 2>&1 & } &&
+ pid=$! &&
+ sleep 1 &&
+ GIT_TRACE=//./pipe/t0051 git status >/dev/null 2>warning &&
+ wait $pid &&
+ test_cmp expect actual
+'
+
+test_done
diff --git a/t/t0090-cache-tree.sh b/t/t0090-cache-tree.sh
index 7de40141ca..94fcb4a78e 100755
--- a/t/t0090-cache-tree.sh
+++ b/t/t0090-cache-tree.sh
@@ -161,6 +161,24 @@ test_expect_success PERL 'commit --interactive gives cache-tree on partial commi
test_cache_tree
'
+test_expect_success PERL 'commit -p with shrinking cache-tree' '
+ mkdir -p deep/subdir &&
+ echo content >deep/subdir/file &&
+ git add deep &&
+ git commit -m add &&
+ git rm -r deep &&
+
+ before=$(wc -c <.git/index) &&
+ git commit -m delete -p &&
+ after=$(wc -c <.git/index) &&
+
+ # double check that the index shrank
+ test $before -gt $after &&
+
+ # and that our index was not corrupted
+ git fsck
+'
+
test_expect_success 'commit in child dir has cache-tree' '
mkdir dir &&
>dir/child.t &&
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
index 0ab02c337d..cfd0655ea1 100755
--- a/t/t0410-partial-clone.sh
+++ b/t/t0410-partial-clone.sh
@@ -193,7 +193,7 @@ test_expect_success 'rev-list stops traversal at missing and promised commit' '
git -C repo config core.repositoryformatversion 1 &&
git -C repo config extensions.partialclone "arbitrary string" &&
- git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
+ GIT_TEST_COMMIT_GRAPH=0 git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
grep $(git -C repo rev-parse bar) out &&
! grep $FOO out
'
diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh
index 1f61eb3e88..25d7c700f6 100755
--- a/t/t1090-sparse-checkout-scope.sh
+++ b/t/t1090-sparse-checkout-scope.sh
@@ -31,6 +31,20 @@ test_expect_success 'perform sparse checkout of master' '
test_path_is_file c
'
+test_expect_success 'checkout -b checkout.optimizeNewBranch interaction' '
+ cp .git/info/sparse-checkout .git/info/sparse-checkout.bak &&
+ test_when_finished "
+ mv -f .git/info/sparse-checkout.bak .git/info/sparse-checkout
+ git checkout master
+ " &&
+ echo "/b" >>.git/info/sparse-checkout &&
+ test "$(git ls-files -t b)" = "S b" &&
+ git -c checkout.optimizeNewBranch=true checkout -b fast &&
+ test "$(git ls-files -t b)" = "S b" &&
+ git checkout -b slow &&
+ test "$(git ls-files -t b)" = "H b"
+'
+
test_expect_success 'merge feature branch into sparse checkout of master' '
git merge feature &&
test_path_is_file a &&
diff --git a/t/t1404-update-ref-errors.sh b/t/t1404-update-ref-errors.sh
index 2a42a589a4..51a4f4c0ac 100755
--- a/t/t1404-update-ref-errors.sh
+++ b/t/t1404-update-ref-errors.sh
@@ -559,9 +559,9 @@ test_expect_success 'no bogus intermediate values during delete' '
{
# Note: the following command is intentionally run in the
# background. We increase the timeout so that `update-ref`
- # attempts to acquire the `packed-refs` lock for longer than
- # it takes for us to do the check then delete it:
- git -c core.packedrefstimeout=3000 update-ref -d $prefix/foo &
+ # attempts to acquire the `packed-refs` lock for much longer
+ # than it takes for us to do the check then delete it:
+ git -c core.packedrefstimeout=30000 update-ref -d $prefix/foo &
} &&
pid2=$! &&
# Give update-ref plenty of time to get to the point where it tries
diff --git a/t/t2025-worktree-add.sh b/t/t2025-worktree-add.sh
index 07d292317c..286bba35d8 100755
--- a/t/t2025-worktree-add.sh
+++ b/t/t2025-worktree-add.sh
@@ -552,4 +552,22 @@ test_expect_success '"add" in bare repo invokes post-checkout hook' '
test_cmp hook.expect goozy/hook.actual
'
+test_expect_success '"add" an existing but missing worktree' '
+ git worktree add --detach pneu &&
+ test_must_fail git worktree add --detach pneu &&
+ rm -fr pneu &&
+ test_must_fail git worktree add --detach pneu &&
+ git worktree add --force --detach pneu
+'
+
+test_expect_success '"add" an existing locked but missing worktree' '
+ git worktree add --detach gnoo &&
+ git worktree lock gnoo &&
+ test_when_finished "git worktree unlock gnoo || :" &&
+ rm -fr gnoo &&
+ test_must_fail git worktree add --detach gnoo &&
+ test_must_fail git worktree add --force --detach gnoo &&
+ git worktree add --force --force --detach gnoo
+'
+
test_done
diff --git a/t/t2028-worktree-move.sh b/t/t2028-worktree-move.sh
index 5f7d45b7b7..33c0337733 100755
--- a/t/t2028-worktree-move.sh
+++ b/t/t2028-worktree-move.sh
@@ -98,6 +98,20 @@ test_expect_success 'move worktree to another dir' '
test_cmp expected2 actual2
'
+test_expect_success 'move locked worktree (force)' '
+ test_when_finished "
+ git worktree unlock flump || :
+ git worktree remove flump || :
+ git worktree unlock ploof || :
+ git worktree remove ploof || :
+ " &&
+ git worktree add --detach flump &&
+ git worktree lock flump &&
+ test_must_fail git worktree move flump ploof" &&
+ test_must_fail git worktree move --force flump ploof" &&
+ git worktree move --force --force flump ploof
+'
+
test_expect_success 'remove main worktree' '
test_must_fail git worktree remove .
'
@@ -141,4 +155,34 @@ test_expect_success 'NOT remove missing-but-locked worktree' '
test_path_is_dir .git/worktrees/gone-but-locked
'
+test_expect_success 'proper error when worktree not found' '
+ for i in noodle noodle/bork
+ do
+ test_must_fail git worktree lock $i 2>err &&
+ test_i18ngrep "not a working tree" err || return 1
+ done
+'
+
+test_expect_success 'remove locked worktree (force)' '
+ git worktree add --detach gumby &&
+ test_when_finished "git worktree remove gumby || :" &&
+ git worktree lock gumby &&
+ test_when_finished "git worktree unlock gumby || :" &&
+ test_must_fail git worktree remove gumby &&
+ test_must_fail git worktree remove --force gumby &&
+ git worktree remove --force --force gumby
+'
+
+test_expect_success 'remove cleans up .git/worktrees when empty' '
+ git init moog &&
+ (
+ cd moog &&
+ test_commit bim &&
+ git worktree add --detach goom &&
+ test_path_exists .git/worktrees &&
+ git worktree remove goom &&
+ test_path_is_missing .git/worktrees
+ )
+'
+
test_done
diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh
index 2237c7f4af..d481f29259 100755
--- a/t/t3206-range-diff.sh
+++ b/t/t3206-range-diff.sh
@@ -133,13 +133,69 @@ test_expect_success 'changed message' '
Z
+ Also a silly comment here!
+
- Zdiff --git a/file b/file
- Z--- a/file
- Z+++ b/file
+ Z diff --git a/file b/file
+ Z --- a/file
+ Z +++ b/file
3: 147e64e = 3: b9cb956 s/11/B/
4: a63e992 = 4: 8add5f1 s/12/B/
EOF
test_cmp expected actual
'
+test_expect_success 'dual-coloring' '
+ sed -e "s|^:||" >expect <<-\EOF &&
+ :<YELLOW>1: a4b3333 = 1: f686024 s/5/A/<RESET>
+ :<RED>2: f51d370 <RESET><YELLOW>!<RESET><GREEN> 2: 4ab067d<RESET><YELLOW> s/4/A/<RESET>
+ : <REVERSE><CYAN>@@ -2,6 +2,8 @@<RESET>
+ : <RESET>
+ : s/4/A/<RESET>
+ : <RESET>
+ : <REVERSE><GREEN>+<RESET><BOLD> Also a silly comment here!<RESET>
+ : <REVERSE><GREEN>+<RESET>
+ : diff --git a/file b/file<RESET>
+ : --- a/file<RESET>
+ : +++ b/file<RESET>
+ :<RED>3: 0559556 <RESET><YELLOW>!<RESET><GREEN> 3: b9cb956<RESET><YELLOW> s/11/B/<RESET>
+ : <REVERSE><CYAN>@@ -10,7 +10,7 @@<RESET>
+ : 9<RESET>
+ : 10<RESET>
+ : <RED> -11<RESET>
+ : <REVERSE><RED>-<RESET><FAINT;GREEN>+BB<RESET>
+ : <REVERSE><GREEN>+<RESET><BOLD;GREEN>+B<RESET>
+ : 12<RESET>
+ : 13<RESET>
+ : 14<RESET>
+ :<RED>4: d966c5c <RESET><YELLOW>!<RESET><GREEN> 4: 8add5f1<RESET><YELLOW> s/12/B/<RESET>
+ : <REVERSE><CYAN>@@ -8,7 +8,7 @@<RESET>
+ : <CYAN> @@<RESET>
+ : 9<RESET>
+ : 10<RESET>
+ : <REVERSE><RED>-<RESET><FAINT> BB<RESET>
+ : <REVERSE><GREEN>+<RESET><BOLD> B<RESET>
+ : <RED> -12<RESET>
+ : <GREEN> +B<RESET>
+ : 13<RESET>
+ EOF
+ git range-diff changed...changed-message --color --dual-color >actual.raw &&
+ test_decode_color >actual <actual.raw &&
+ test_cmp expect actual
+'
+
+for prev in topic master..topic
+do
+ test_expect_success "format-patch --range-diff=$prev" '
+ git format-patch --stdout --cover-letter --range-diff=$prev \
+ master..unmodified >actual &&
+ grep "= 1: .* s/5/A" actual &&
+ grep "= 2: .* s/4/A" actual &&
+ grep "= 3: .* s/11/B" actual &&
+ grep "= 4: .* s/12/B" actual
+ '
+done
+
+test_expect_success 'format-patch --range-diff as commentary' '
+ git format-patch --stdout --range-diff=HEAD~1 HEAD~1 >actual &&
+ test_i18ngrep "^Range-diff:$" actual
+'
+
test_done
diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh
index 86bba5ed7c..ff89b6341a 100755
--- a/t/t3404-rebase-interactive.sh
+++ b/t/t3404-rebase-interactive.sh
@@ -569,16 +569,15 @@ test_expect_success '--continue tries to commit, even for "edit"' '
'
test_expect_success 'aborted --continue does not squash commits after "edit"' '
- test_when_finished "git rebase --abort" &&
old=$(git rev-parse HEAD) &&
test_tick &&
set_fake_editor &&
FAKE_LINES="edit 1" git rebase -i HEAD^ &&
echo "edited again" > file7 &&
git add file7 &&
- echo all the things >>conflict &&
- test_must_fail git rebase --continue &&
- test $old = $(git rev-parse HEAD)
+ test_must_fail env FAKE_COMMIT_MESSAGE=" " git rebase --continue &&
+ test $old = $(git rev-parse HEAD) &&
+ git rebase --abort
'
test_expect_success 'auto-amend only edited commits after "edit"' '
diff --git a/t/t3405-rebase-malformed.sh b/t/t3405-rebase-malformed.sh
index da94dddc86..860e63e444 100755
--- a/t/t3405-rebase-malformed.sh
+++ b/t/t3405-rebase-malformed.sh
@@ -83,7 +83,7 @@ test_expect_success 'rebase -m commit with empty message' '
test_expect_success 'rebase -i commit with empty message' '
git checkout diff-in-message &&
set_fake_editor &&
- env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+ test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
git rebase -i HEAD^
'
diff --git a/t/t3415-rebase-autosquash.sh b/t/t3415-rebase-autosquash.sh
index e364c12622..13f5688135 100755
--- a/t/t3415-rebase-autosquash.sh
+++ b/t/t3415-rebase-autosquash.sh
@@ -330,4 +330,23 @@ test_expect_success 'wrapped original subject' '
test $base = $parent
'
+test_expect_success 'abort last squash' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_when_finished "git checkout master" &&
+
+ git checkout -b some-squashes &&
+ git commit --allow-empty -m first &&
+ git commit --allow-empty --squash HEAD &&
+ git commit --allow-empty -m second &&
+ git commit --allow-empty --squash HEAD &&
+
+ test_must_fail git -c core.editor="grep -q ^pick" \
+ rebase -ki --autosquash HEAD~4 &&
+ : do not finish the squash, but resolve it manually &&
+ git commit --allow-empty --amend -m edited-first &&
+ git rebase --skip &&
+ git show >actual &&
+ ! grep first actual
+'
+
test_done
diff --git a/t/t3505-cherry-pick-empty.sh b/t/t3505-cherry-pick-empty.sh
index fbdc47cfbd..5f911bb529 100755
--- a/t/t3505-cherry-pick-empty.sh
+++ b/t/t3505-cherry-pick-empty.sh
@@ -11,17 +11,14 @@ test_expect_success setup '
test_tick &&
git commit -m "first" &&
- git checkout -b empty-branch &&
- test_tick &&
- git commit --allow-empty -m "empty" &&
-
+ git checkout -b empty-message-branch &&
echo third >> file1 &&
git add file1 &&
test_tick &&
git commit --allow-empty-message -m "" &&
git checkout master &&
- git checkout -b empty-branch2 &&
+ git checkout -b empty-change-branch &&
test_tick &&
git commit --allow-empty -m "empty"
@@ -29,7 +26,7 @@ test_expect_success setup '
test_expect_success 'cherry-pick an empty commit' '
git checkout master &&
- test_expect_code 1 git cherry-pick empty-branch^
+ test_expect_code 1 git cherry-pick empty-change-branch
'
test_expect_success 'index lockfile was removed' '
@@ -37,8 +34,9 @@ test_expect_success 'index lockfile was removed' '
'
test_expect_success 'cherry-pick a commit with an empty message' '
+ test_when_finished "git reset --hard empty-message-branch~1" &&
git checkout master &&
- test_expect_code 1 git cherry-pick empty-branch
+ git cherry-pick empty-message-branch
'
test_expect_success 'index lockfile was removed' '
@@ -47,7 +45,7 @@ test_expect_success 'index lockfile was removed' '
test_expect_success 'cherry-pick a commit with an empty message with --allow-empty-message' '
git checkout -f master &&
- git cherry-pick --allow-empty-message empty-branch
+ git cherry-pick --allow-empty-message empty-message-branch
'
test_expect_success 'cherry pick an empty non-ff commit without --allow-empty' '
@@ -55,12 +53,12 @@ test_expect_success 'cherry pick an empty non-ff commit without --allow-empty' '
echo fourth >>file2 &&
git add file2 &&
git commit -m "fourth" &&
- test_must_fail git cherry-pick empty-branch2
+ test_must_fail git cherry-pick empty-change-branch
'
test_expect_success 'cherry pick an empty non-ff commit with --allow-empty' '
git checkout master &&
- git cherry-pick --allow-empty empty-branch2
+ git cherry-pick --allow-empty empty-change-branch
'
test_expect_success 'cherry pick with --keep-redundant-commits' '
diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh
index 609fbfdc31..65dfbc033a 100755
--- a/t/t3701-add-interactive.sh
+++ b/t/t3701-add-interactive.sh
@@ -540,7 +540,7 @@ test_expect_success 'add -p does not expand argument lists' '
# update it, but we want to be sure that our "." pathspec
# was not expanded into the argument list of any command.
# So look only for "not-changed".
- ! grep not-changed trace.out
+ ! grep -E "^trace: (built-in|exec|run_command): .*not-changed" trace.out
'
test_expect_success 'hunk-editing handles custom comment char' '
diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh
index 53880da7bb..909c743c13 100755
--- a/t/t4014-format-patch.sh
+++ b/t/t4014-format-patch.sh
@@ -1717,4 +1717,38 @@ test_expect_success 'format-patch --pretty=mboxrd' '
test_cmp expect actual
'
+test_expect_success 'interdiff: setup' '
+ git checkout -b boop master &&
+ test_commit fnorp blorp &&
+ test_commit fleep blorp
+'
+
+test_expect_success 'interdiff: cover-letter' '
+ sed "y/q/ /" >expect <<-\EOF &&
+ +fleep
+ --q
+ EOF
+ git format-patch --cover-letter --interdiff=boop~2 -1 boop &&
+ test_i18ngrep "^Interdiff:$" 0000-cover-letter.patch &&
+ test_i18ngrep ! "^Interdiff:$" 0001-fleep.patch &&
+ sed "1,/^@@ /d; /^-- $/q" <0000-cover-letter.patch >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'interdiff: reroll-count' '
+ git format-patch --cover-letter --interdiff=boop~2 -v2 -1 boop &&
+ test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch
+'
+
+test_expect_success 'interdiff: solo-patch' '
+ cat >expect <<-\EOF &&
+ +fleep
+
+ EOF
+ git format-patch --interdiff=boop~2 -1 boop &&
+ test_i18ngrep "^Interdiff:$" 0001-fleep.patch &&
+ sed "1,/^ @@ /d; /^$/q" <0001-fleep.patch >actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t4200-rerere.sh b/t/t4200-rerere.sh
index 65da74c766..55b7750ade 100755
--- a/t/t4200-rerere.sh
+++ b/t/t4200-rerere.sh
@@ -577,4 +577,98 @@ test_expect_success 'multiple identical conflicts' '
count_pre_post 0 0
'
+test_expect_success 'rerere with unexpected conflict markers does not crash' '
+ git reset --hard &&
+
+ git checkout -b branch-1 master &&
+ echo "bar" >test &&
+ git add test &&
+ git commit -q -m two &&
+
+ git reset --hard &&
+ git checkout -b branch-2 master &&
+ echo "foo" >test &&
+ git add test &&
+ git commit -q -a -m one &&
+
+ test_must_fail git merge branch-1 &&
+ echo "<<<<<<< a" >test &&
+ git rerere &&
+
+ git rerere clear
+'
+
+test_expect_success 'rerere with inner conflict markers' '
+ git reset --hard &&
+
+ git checkout -b A master &&
+ echo "bar" >test &&
+ git add test &&
+ git commit -q -m two &&
+ echo "baz" >test &&
+ git add test &&
+ git commit -q -m three &&
+
+ git reset --hard &&
+ git checkout -b B master &&
+ echo "foo" >test &&
+ git add test &&
+ git commit -q -a -m one &&
+
+ test_must_fail git merge A~ &&
+ git add test &&
+ git commit -q -m "will solve conflicts later" &&
+ test_must_fail git merge A &&
+
+ echo "resolved" >test &&
+ git add test &&
+ git commit -q -m "solved conflict" &&
+
+ echo "resolved" >expect &&
+
+ git reset --hard HEAD~~ &&
+ test_must_fail git merge A~ &&
+ git add test &&
+ git commit -q -m "will solve conflicts later" &&
+ test_must_fail git merge A &&
+ cat test >actual &&
+ test_cmp expect actual &&
+
+ git add test &&
+ git commit -m "rerere solved conflict" &&
+ git reset --hard HEAD~ &&
+ test_must_fail git merge A &&
+ cat test >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'setup simple stage 1 handling' '
+ test_create_repo stage_1_handling &&
+ (
+ cd stage_1_handling &&
+
+ test_seq 1 10 >original &&
+ git add original &&
+ git commit -m original &&
+
+ git checkout -b A master &&
+ git mv original A &&
+ git commit -m "rename to A" &&
+
+ git checkout -b B master &&
+ git mv original B &&
+ git commit -m "rename to B"
+ )
+'
+
+test_expect_success 'test simple stage 1 handling' '
+ (
+ cd stage_1_handling &&
+
+ git config rerere.enabled true &&
+ git checkout A^0 &&
+ test_must_fail git merge B^0
+ )
+'
+
test_done
diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
index 2052cadb11..978a8a66ff 100755
--- a/t/t4205-log-pretty-formats.sh
+++ b/t/t4205-log-pretty-formats.sh
@@ -598,4 +598,27 @@ test_expect_success ':only and :unfold work together' '
test_cmp expect actual
'
+test_expect_success 'trailer parsing not fooled by --- line' '
+ git commit --allow-empty -F - <<-\EOF &&
+ this is the subject
+
+ This is the body. The message has a "---" line which would confuse a
+ message+patch parser. But here we know we have only a commit message,
+ so we get it right.
+
+ trailer: wrong
+ ---
+ This is more body.
+
+ trailer: right
+ EOF
+
+ {
+ echo "trailer: right" &&
+ echo
+ } >expect &&
+ git log --no-walk --format="%(trailers)" >actual &&
+ test_cmp expect actual
+'
+
test_done
diff --git a/t/t4256-am-format-flowed.sh b/t/t4256-am-format-flowed.sh
new file mode 100755
index 0000000000..6340310e9a
--- /dev/null
+++ b/t/t4256-am-format-flowed.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+test_description='test format=flowed support of git am'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ cp "$TEST_DIRECTORY/t4256/1/mailinfo.c.orig" mailinfo.c &&
+ git add mailinfo.c &&
+ git commit -m initial
+'
+
+test_expect_success 'am with format=flowed' '
+ git am <"$TEST_DIRECTORY/t4256/1/patch" >stdout 2>stderr &&
+ test_i18ngrep "warning: Patch sent with format=flowed" stderr &&
+ test_cmp "$TEST_DIRECTORY/t4256/1/mailinfo.c" mailinfo.c
+'
+
+test_done
diff --git a/t/t4256/1/mailinfo.c b/t/t4256/1/mailinfo.c
new file mode 100644
index 0000000000..b395adbdf2
--- /dev/null
+++ b/t/t4256/1/mailinfo.c
@@ -0,0 +1,1245 @@
+#include "cache.h"
+#include "config.h"
+#include "utf8.h"
+#include "strbuf.h"
+#include "mailinfo.h"
+
+static void cleanup_space(struct strbuf *sb)
+{
+ size_t pos, cnt;
+ for (pos = 0; pos < sb->len; pos++) {
+ if (isspace(sb->buf[pos])) {
+ sb->buf[pos] = ' ';
+ for (cnt = 0; isspace(sb->buf[pos + cnt + 1]); cnt++);
+ strbuf_remove(sb, pos + 1, cnt);
+ }
+ }
+}
+
+static void get_sane_name(struct strbuf *out, struct strbuf *name, struct strbuf *email)
+{
+ struct strbuf *src = name;
+ if (name->len < 3 || 60 < name->len || strchr(name->buf, '@') ||
+ strchr(name->buf, '<') || strchr(name->buf, '>'))
+ src = email;
+ else if (name == out)
+ return;
+ strbuf_reset(out);
+ strbuf_addbuf(out, src);
+}
+
+static void parse_bogus_from(struct mailinfo *mi, const struct strbuf *line)
+{
+ /* John Doe <johndoe> */
+
+ char *bra, *ket;
+ /* This is fallback, so do not bother if we already have an
+ * e-mail address.
+ */
+ if (mi->email.len)
+ return;
+
+ bra = strchr(line->buf, '<');
+ if (!bra)
+ return;
+ ket = strchr(bra, '>');
+ if (!ket)
+ return;
+
+ strbuf_reset(&mi->email);
+ strbuf_add(&mi->email, bra + 1, ket - bra - 1);
+
+ strbuf_reset(&mi->name);
+ strbuf_add(&mi->name, line->buf, bra - line->buf);
+ strbuf_trim(&mi->name);
+ get_sane_name(&mi->name, &mi->name, &mi->email);
+}
+
+static const char *unquote_comment(struct strbuf *outbuf, const char *in)
+{
+ int c;
+ int take_next_literally = 0;
+
+ strbuf_addch(outbuf, '(');
+
+ while ((c = *in++) != 0) {
+ if (take_next_literally == 1) {
+ take_next_literally = 0;
+ } else {
+ switch (c) {
+ case '\\':
+ take_next_literally = 1;
+ continue;
+ case '(':
+ in = unquote_comment(outbuf, in);
+ continue;
+ case ')':
+ strbuf_addch(outbuf, ')');
+ return in;
+ }
+ }
+
+ strbuf_addch(outbuf, c);
+ }
+
+ return in;
+}
+
+static const char *unquote_quoted_string(struct strbuf *outbuf, const char *in)
+{
+ int c;
+ int take_next_literally = 0;
+
+ while ((c = *in++) != 0) {
+ if (take_next_literally == 1) {
+ take_next_literally = 0;
+ } else {
+ switch (c) {
+ case '\\':
+ take_next_literally = 1;
+ continue;
+ case '"':
+ return in;
+ }
+ }
+
+ strbuf_addch(outbuf, c);
+ }
+
+ return in;
+}
+
+static void unquote_quoted_pair(struct strbuf *line)
+{
+ struct strbuf outbuf;
+ const char *in = line->buf;
+ int c;
+
+ strbuf_init(&outbuf, line->len);
+
+ while ((c = *in++) != 0) {
+ switch (c) {
+ case '"':
+ in = unquote_quoted_string(&outbuf, in);
+ continue;
+ case '(':
+ in = unquote_comment(&outbuf, in);
+ continue;
+ }
+
+ strbuf_addch(&outbuf, c);
+ }
+
+ strbuf_swap(&outbuf, line);
+ strbuf_release(&outbuf);
+
+}
+
+static void handle_from(struct mailinfo *mi, const struct strbuf *from)
+{
+ char *at;
+ size_t el;
+ struct strbuf f;
+
+ strbuf_init(&f, from->len);
+ strbuf_addbuf(&f, from);
+
+ unquote_quoted_pair(&f);
+
+ at = strchr(f.buf, '@');
+ if (!at) {
+ parse_bogus_from(mi, from);
+ goto out;
+ }
+
+ /*
+ * If we already have one email, don't take any confusing lines
+ */
+ if (mi->email.len && strchr(at + 1, '@'))
+ goto out;
+
+ /* Pick up the string around '@', possibly delimited with <>
+ * pair; that is the email part.
+ */
+ while (at > f.buf) {
+ char c = at[-1];
+ if (isspace(c))
+ break;
+ if (c == '<') {
+ at[-1] = ' ';
+ break;
+ }
+ at--;
+ }
+ el = strcspn(at, " \n\t\r\v\f>");
+ strbuf_reset(&mi->email);
+ strbuf_add(&mi->email, at, el);
+ strbuf_remove(&f, at - f.buf, el + (at[el] ? 1 : 0));
+
+ /* The remainder is name. It could be
+ *
+ * - "John Doe <john.doe@xz>" (a), or
+ * - "john.doe@xz (John Doe)" (b), or
+ * - "John (zzz) Doe <john.doe@xz> (Comment)" (c)
+ *
+ * but we have removed the email part, so
+ *
+ * - remove extra spaces which could stay after email (case 'c'), and
+ * - trim from both ends, possibly removing the () pair at the end
+ * (cases 'a' and 'b').
+ */
+ cleanup_space(&f);
+ strbuf_trim(&f);
+ if (f.buf[0] == '(' && f.len && f.buf[f.len - 1] == ')') {
+ strbuf_remove(&f, 0, 1);
+ strbuf_setlen(&f, f.len - 1);
+ }
+
+ get_sane_name(&mi->name, &f, &mi->email);
+out:
+ strbuf_release(&f);
+}
+
+static void handle_header(struct strbuf **out, const struct strbuf *line)
+{
+ if (!*out) {
+ *out = xmalloc(sizeof(struct strbuf));
+ strbuf_init(*out, line->len);
+ } else
+ strbuf_reset(*out);
+
+ strbuf_addbuf(*out, line);
+}
+
+/* NOTE NOTE NOTE. We do not claim we do full MIME. We just attempt
+ * to have enough heuristics to grok MIME encoded patches often found
+ * on our mailing lists. For example, we do not even treat header lines
+ * case insensitively.
+ */
+
+static int slurp_attr(const char *line, const char *name, struct strbuf *attr)
+{
+ const char *ends, *ap = strcasestr(line, name);
+ size_t sz;
+
+ strbuf_setlen(attr, 0);
+ if (!ap)
+ return 0;
+ ap += strlen(name);
+ if (*ap == '"') {
+ ap++;
+ ends = "\"";
+ }
+ else
+ ends = "; \t";
+ sz = strcspn(ap, ends);
+ strbuf_add(attr, ap, sz);
+ return 1;
+}
+
+static int has_attr_value(const char *line, const char *name, const char *value)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value);
+ strbuf_release(&sb);
+ return rc;
+}
+
+static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
+ strbuf_init(boundary, line->len);
+
+ mi->format_flowed = has_attr_value(line->buf, "format=", "flowed");
+ mi->delsp = has_attr_value(line->buf, "delsp=", "yes");
+
+ if (slurp_attr(line->buf, "boundary=", boundary)) {
+ strbuf_insert(boundary, 0, "--", 2);
+ if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
+ error("Too many boundaries to handle");
+ mi->input_error = -1;
+ mi->content_top = &mi->content[MAX_BOUNDARIES] - 1;
+ return;
+ }
+ *(mi->content_top) = boundary;
+ boundary = NULL;
+ }
+ slurp_attr(line->buf, "charset=", &mi->charset);
+
+ if (boundary) {
+ strbuf_release(boundary);
+ free(boundary);
+ }
+}
+
+static void handle_content_transfer_encoding(struct mailinfo *mi,
+ const struct strbuf *line)
+{
+ if (strcasestr(line->buf, "base64"))
+ mi->transfer_encoding = TE_BASE64;
+ else if (strcasestr(line->buf, "quoted-printable"))
+ mi->transfer_encoding = TE_QP;
+ else
+ mi->transfer_encoding = TE_DONTCARE;
+}
+
+static int is_multipart_boundary(struct mailinfo *mi, const struct strbuf *line)
+{
+ struct strbuf *content_top = *(mi->content_top);
+
+ return ((content_top->len <= line->len) &&
+ !memcmp(line->buf, content_top->buf, content_top->len));
+}
+
+static void cleanup_subject(struct mailinfo *mi, struct strbuf *subject)
+{
+ size_t at = 0;
+
+ while (at < subject->len) {
+ char *pos;
+ size_t remove;
+
+ switch (subject->buf[at]) {
+ case 'r': case 'R':
+ if (subject->len <= at + 3)
+ break;
+ if ((subject->buf[at + 1] == 'e' ||
+ subject->buf[at + 1] == 'E') &&
+ subject->buf[at + 2] == ':') {
+ strbuf_remove(subject, at, 3);
+ continue;
+ }
+ at++;
+ break;
+ case ' ': case '\t': case ':':
+ strbuf_remove(subject, at, 1);
+ continue;
+ case '[':
+ pos = strchr(subject->buf + at, ']');
+ if (!pos)
+ break;
+ remove = pos - subject->buf + at + 1;
+ if (!mi->keep_non_patch_brackets_in_subject ||
+ (7 <= remove &&
+ memmem(subject->buf + at, remove, "PATCH", 5)))
+ strbuf_remove(subject, at, remove);
+ else {
+ at += remove;
+ /*
+ * If the input had a space after the ], keep
+ * it. We don't bother with finding the end of
+ * the space, since we later normalize it
+ * anyway.
+ */
+ if (isspace(subject->buf[at]))
+ at += 1;
+ }
+ continue;
+ }
+ break;
+ }
+ strbuf_trim(subject);
+}
+
+#define MAX_HDR_PARSED 10
+static const char *header[MAX_HDR_PARSED] = {
+ "From","Subject","Date",
+};
+
+static inline int cmp_header(const struct strbuf *line, const char *hdr)
+{
+ int len = strlen(hdr);
+ return !strncasecmp(line->buf, hdr, len) && line->len > len &&
+ line->buf[len] == ':' && isspace(line->buf[len + 1]);
+}
+
+static int is_format_patch_separator(const char *line, int len)
+{
+ static const char SAMPLE[] =
+ "From e6807f3efca28b30decfecb1732a56c7db1137ee Mon Sep 17 00:00:00 2001\n";
+ const char *cp;
+
+ if (len != strlen(SAMPLE))
+ return 0;
+ if (!skip_prefix(line, "From ", &cp))
+ return 0;
+ if (strspn(cp, "0123456789abcdef") != 40)
+ return 0;
+ cp += 40;
+ return !memcmp(SAMPLE + (cp - line), cp, strlen(SAMPLE) - (cp - line));
+}
+
+static struct strbuf *decode_q_segment(const struct strbuf *q_seg, int rfc2047)
+{
+ const char *in = q_seg->buf;
+ int c;
+ struct strbuf *out = xmalloc(sizeof(struct strbuf));
+ strbuf_init(out, q_seg->len);
+
+ while ((c = *in++) != 0) {
+ if (c == '=') {
+ int ch, d = *in;
+ if (d == '\n' || !d)
+ break; /* drop trailing newline */
+ ch = hex2chr(in);
+ if (ch >= 0) {
+ strbuf_addch(out, ch);
+ in += 2;
+ continue;
+ }
+ /* garbage -- fall through */
+ }
+ if (rfc2047 && c == '_') /* rfc2047 4.2 (2) */
+ c = 0x20;
+ strbuf_addch(out, c);
+ }
+ return out;
+}
+
+static struct strbuf *decode_b_segment(const struct strbuf *b_seg)
+{
+ /* Decode in..ep, possibly in-place to ot */
+ int c, pos = 0, acc = 0;
+ const char *in = b_seg->buf;
+ struct strbuf *out = xmalloc(sizeof(struct strbuf));
+ strbuf_init(out, b_seg->len);
+
+ while ((c = *in++) != 0) {
+ if (c == '+')
+ c = 62;
+ else if (c == '/')
+ c = 63;
+ else if ('A' <= c && c <= 'Z')
+ c -= 'A';
+ else if ('a' <= c && c <= 'z')
+ c -= 'a' - 26;
+ else if ('0' <= c && c <= '9')
+ c -= '0' - 52;
+ else
+ continue; /* garbage */
+ switch (pos++) {
+ case 0:
+ acc = (c << 2);
+ break;
+ case 1:
+ strbuf_addch(out, (acc | (c >> 4)));
+ acc = (c & 15) << 4;
+ break;
+ case 2:
+ strbuf_addch(out, (acc | (c >> 2)));
+ acc = (c & 3) << 6;
+ break;
+ case 3:
+ strbuf_addch(out, (acc | c));
+ acc = pos = 0;
+ break;
+ }
+ }
+ return out;
+}
+
+static int convert_to_utf8(struct mailinfo *mi,
+ struct strbuf *line, const char *charset)
+{
+ char *out;
+
+ if (!mi->metainfo_charset || !charset || !*charset)
+ return 0;
+
+ if (same_encoding(mi->metainfo_charset, charset))
+ return 0;
+ out = reencode_string(line->buf, mi->metainfo_charset, charset);
+ if (!out) {
+ mi->input_error = -1;
+ return error("cannot convert from %s to %s",
+ charset, mi->metainfo_charset);
+ }
+ strbuf_attach(line, out, strlen(out), strlen(out));
+ return 0;
+}
+
+static void decode_header(struct mailinfo *mi, struct strbuf *it)
+{
+ char *in, *ep, *cp;
+ struct strbuf outbuf = STRBUF_INIT, *dec;
+ struct strbuf charset_q = STRBUF_INIT, piecebuf = STRBUF_INIT;
+ int found_error = 1; /* pessimism */
+
+ in = it->buf;
+ while (in - it->buf <= it->len && (ep = strstr(in, "=?")) != NULL) {
+ int encoding;
+ strbuf_reset(&charset_q);
+ strbuf_reset(&piecebuf);
+
+ if (in != ep) {
+ /*
+ * We are about to process an encoded-word
+ * that begins at ep, but there is something
+ * before the encoded word.
+ */
+ char *scan;
+ for (scan = in; scan < ep; scan++)
+ if (!isspace(*scan))
+ break;
+
+ if (scan != ep || in == it->buf) {
+ /*
+ * We should not lose that "something",
+ * unless we have just processed an
+ * encoded-word, and there is only LWS
+ * before the one we are about to process.
+ */
+ strbuf_add(&outbuf, in, ep - in);
+ }
+ }
+ /* E.g.
+ * ep : "=?iso-2022-jp?B?GyR...?= foo"
+ * ep : "=?ISO-8859-1?Q?Foo=FCbar?= baz"
+ */
+ ep += 2;
+
+ if (ep - it->buf >= it->len || !(cp = strchr(ep, '?')))
+ goto release_return;
+
+ if (cp + 3 - it->buf > it->len)
+ goto release_return;
+ strbuf_add(&charset_q, ep, cp - ep);
+
+ encoding = cp[1];
+ if (!encoding || cp[2] != '?')
+ goto release_return;
+ ep = strstr(cp + 3, "?=");
+ if (!ep)
+ goto release_return;
+ strbuf_add(&piecebuf, cp + 3, ep - cp - 3);
+ switch (tolower(encoding)) {
+ default:
+ goto release_return;
+ case 'b':
+ dec = decode_b_segment(&piecebuf);
+ break;
+ case 'q':
+ dec = decode_q_segment(&piecebuf, 1);
+ break;
+ }
+ if (convert_to_utf8(mi, dec, charset_q.buf))
+ goto release_return;
+
+ strbuf_addbuf(&outbuf, dec);
+ strbuf_release(dec);
+ free(dec);
+ in = ep + 2;
+ }
+ strbuf_addstr(&outbuf, in);
+ strbuf_reset(it);
+ strbuf_addbuf(it, &outbuf);
+ found_error = 0;
+release_return:
+ strbuf_release(&outbuf);
+ strbuf_release(&charset_q);
+ strbuf_release(&piecebuf);
+
+ if (found_error)
+ mi->input_error = -1;
+}
+
+static int check_header(struct mailinfo *mi,
+ const struct strbuf *line,
+ struct strbuf *hdr_data[], int overwrite)
+{
+ int i, ret = 0, len;
+ struct strbuf sb = STRBUF_INIT;
+
+ /* search for the interesting parts */
+ for (i = 0; header[i]; i++) {
+ int len = strlen(header[i]);
+ if ((!hdr_data[i] || overwrite) && cmp_header(line, header[i])) {
+ /* Unwrap inline B and Q encoding, and optionally
+ * normalize the meta information to utf8.
+ */
+ strbuf_add(&sb, line->buf + len + 2, line->len - len - 2);
+ decode_header(mi, &sb);
+ handle_header(&hdr_data[i], &sb);
+ ret = 1;
+ goto check_header_out;
+ }
+ }
+
+ /* Content stuff */
+ if (cmp_header(line, "Content-Type")) {
+ len = strlen("Content-Type: ");
+ strbuf_add(&sb, line->buf + len, line->len - len);
+ decode_header(mi, &sb);
+ strbuf_insert(&sb, 0, "Content-Type: ", len);
+ handle_content_type(mi, &sb);
+ ret = 1;
+ goto check_header_out;
+ }
+ if (cmp_header(line, "Content-Transfer-Encoding")) {
+ len = strlen("Content-Transfer-Encoding: ");
+ strbuf_add(&sb, line->buf + len, line->len - len);
+ decode_header(mi, &sb);
+ handle_content_transfer_encoding(mi, &sb);
+ ret = 1;
+ goto check_header_out;
+ }
+ if (cmp_header(line, "Message-Id")) {
+ len = strlen("Message-Id: ");
+ strbuf_add(&sb, line->buf + len, line->len - len);
+ decode_header(mi, &sb);
+ if (mi->add_message_id)
+ mi->message_id = strbuf_detach(&sb, NULL);
+ ret = 1;
+ goto check_header_out;
+ }
+
+check_header_out:
+ strbuf_release(&sb);
+ return ret;
+}
+
+/*
+ * Returns 1 if the given line or any line beginning with the given line is an
+ * in-body header (that is, check_header will succeed when passed
+ * mi->s_hdr_data).
+ */
+static int is_inbody_header(const struct mailinfo *mi,
+ const struct strbuf *line)
+{
+ int i;
+ for (i = 0; header[i]; i++)
+ if (!mi->s_hdr_data[i] && cmp_header(line, header[i]))
+ return 1;
+ return 0;
+}
+
+static void decode_transfer_encoding(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf *ret;
+
+ switch (mi->transfer_encoding) {
+ case TE_QP:
+ ret = decode_q_segment(line, 0);
+ break;
+ case TE_BASE64:
+ ret = decode_b_segment(line);
+ break;
+ case TE_DONTCARE:
+ default:
+ return;
+ }
+ strbuf_reset(line);
+ strbuf_addbuf(line, ret);
+ strbuf_release(ret);
+ free(ret);
+}
+
+static inline int patchbreak(const struct strbuf *line)
+{
+ size_t i;
+
+ /* Beginning of a "diff -" header? */
+ if (starts_with(line->buf, "diff -"))
+ return 1;
+
+ /* CVS "Index: " line? */
+ if (starts_with(line->buf, "Index: "))
+ return 1;
+
+ /*
+ * "--- <filename>" starts patches without headers
+ * "---<sp>*" is a manual separator
+ */
+ if (line->len < 4)
+ return 0;
+
+ if (starts_with(line->buf, "---")) {
+ /* space followed by a filename? */
+ if (line->buf[3] == ' ' && !isspace(line->buf[4]))
+ return 1;
+ /* Just whitespace? */
+ for (i = 3; i < line->len; i++) {
+ unsigned char c = line->buf[i];
+ if (c == '\n')
+ return 1;
+ if (!isspace(c))
+ break;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+static int is_scissors_line(const char *line)
+{
+ const char *c;
+ int scissors = 0, gap = 0;
+ const char *first_nonblank = NULL, *last_nonblank = NULL;
+ int visible, perforation = 0, in_perforation = 0;
+
+ for (c = line; *c; c++) {
+ if (isspace(*c)) {
+ if (in_perforation) {
+ perforation++;
+ gap++;
+ }
+ continue;
+ }
+ last_nonblank = c;
+ if (first_nonblank == NULL)
+ first_nonblank = c;
+ if (*c == '-') {
+ in_perforation = 1;
+ perforation++;
+ continue;
+ }
+ if ((!memcmp(c, ">8", 2) || !memcmp(c, "8<", 2) ||
+ !memcmp(c, ">%", 2) || !memcmp(c, "%<", 2))) {
+ in_perforation = 1;
+ perforation += 2;
+ scissors += 2;
+ c++;
+ continue;
+ }
+ in_perforation = 0;
+ }
+
+ /*
+ * The mark must be at least 8 bytes long (e.g. "-- >8 --").
+ * Even though there can be arbitrary cruft on the same line
+ * (e.g. "cut here"), in order to avoid misidentification, the
+ * perforation must occupy more than a third of the visible
+ * width of the line, and dashes and scissors must occupy more
+ * than half of the perforation.
+ */
+
+ if (first_nonblank && last_nonblank)
+ visible = last_nonblank - first_nonblank + 1;
+ else
+ visible = 0;
+ return (scissors && 8 <= visible &&
+ visible < perforation * 3 &&
+ gap * 2 < perforation);
+}
+
+static void flush_inbody_header_accum(struct mailinfo *mi)
+{
+ if (!mi->inbody_header_accum.len)
+ return;
+ if (!check_header(mi, &mi->inbody_header_accum, mi->s_hdr_data, 0))
+ BUG("inbody_header_accum, if not empty, must always contain a valid in-body header");
+ strbuf_reset(&mi->inbody_header_accum);
+}
+
+static int check_inbody_header(struct mailinfo *mi, const struct strbuf *line)
+{
+ if (mi->inbody_header_accum.len &&
+ (line->buf[0] == ' ' || line->buf[0] == '\t')) {
+ if (mi->use_scissors && is_scissors_line(line->buf)) {
+ /*
+ * This is a scissors line; do not consider this line
+ * as a header continuation line.
+ */
+ flush_inbody_header_accum(mi);
+ return 0;
+ }
+ strbuf_strip_suffix(&mi->inbody_header_accum, "\n");
+ strbuf_addbuf(&mi->inbody_header_accum, line);
+ return 1;
+ }
+
+ flush_inbody_header_accum(mi);
+
+ if (starts_with(line->buf, ">From") && isspace(line->buf[5]))
+ return is_format_patch_separator(line->buf + 1, line->len - 1);
+ if (starts_with(line->buf, "[PATCH]") && isspace(line->buf[7])) {
+ int i;
+ for (i = 0; header[i]; i++)
+ if (!strcmp("Subject", header[i])) {
+ handle_header(&mi->s_hdr_data[i], line);
+ return 1;
+ }
+ return 0;
+ }
+ if (is_inbody_header(mi, line)) {
+ strbuf_addbuf(&mi->inbody_header_accum, line);
+ return 1;
+ }
+ return 0;
+}
+
+static int handle_commit_msg(struct mailinfo *mi, struct strbuf *line)
+{
+ assert(!mi->filter_stage);
+
+ if (mi->header_stage) {
+ if (!line->len || (line->len == 1 && line->buf[0] == '\n')) {
+ if (mi->inbody_header_accum.len) {
+ flush_inbody_header_accum(mi);
+ mi->header_stage = 0;
+ }
+ return 0;
+ }
+ }
+
+ if (mi->use_inbody_headers && mi->header_stage) {
+ mi->header_stage = check_inbody_header(mi, line);
+ if (mi->header_stage)
+ return 0;
+ } else
+ /* Only trim the first (blank) line of the commit message
+ * when ignoring in-body headers.
+ */
+ mi->header_stage = 0;
+
+ /* normalize the log message to UTF-8. */
+ if (convert_to_utf8(mi, line, mi->charset.buf))
+ return 0; /* mi->input_error already set */
+
+ if (mi->use_scissors && is_scissors_line(line->buf)) {
+ int i;
+
+ strbuf_setlen(&mi->log_message, 0);
+ mi->header_stage = 1;
+
+ /*
+ * We may have already read "secondary headers"; purge
+ * them to give ourselves a clean restart.
+ */
+ for (i = 0; header[i]; i++) {
+ if (mi->s_hdr_data[i])
+ strbuf_release(mi->s_hdr_data[i]);
+ mi->s_hdr_data[i] = NULL;
+ }
+ return 0;
+ }
+
+ if (patchbreak(line)) {
+ if (mi->message_id)
+ strbuf_addf(&mi->log_message,
+ "Message-Id: %s\n", mi->message_id);
+ return 1;
+ }
+
+ strbuf_addbuf(&mi->log_message, line);
+ return 0;
+}
+
+static void handle_patch(struct mailinfo *mi, const struct strbuf *line)
+{
+ fwrite(line->buf, 1, line->len, mi->patchfile);
+ mi->patch_lines++;
+}
+
+static void handle_filter(struct mailinfo *mi, struct strbuf *line)
+{
+ switch (mi->filter_stage) {
+ case 0:
+ if (!handle_commit_msg(mi, line))
+ break;
+ mi->filter_stage++;
+ /* fallthrough */
+ case 1:
+ handle_patch(mi, line);
+ break;
+ }
+}
+
+static int is_rfc2822_header(const struct strbuf *line)
+{
+ /*
+ * The section that defines the loosest possible
+ * field name is "3.6.8 Optional fields".
+ *
+ * optional-field = field-name ":" unstructured CRLF
+ * field-name = 1*ftext
+ * ftext = %d33-57 / %59-126
+ */
+ int ch;
+ char *cp = line->buf;
+
+ /* Count mbox From headers as headers */
+ if (starts_with(cp, "From ") || starts_with(cp, ">From "))
+ return 1;
+
+ while ((ch = *cp++)) {
+ if (ch == ':')
+ return 1;
+ if ((33 <= ch && ch <= 57) ||
+ (59 <= ch && ch <= 126))
+ continue;
+ break;
+ }
+ return 0;
+}
+
+static int read_one_header_line(struct strbuf *line, FILE *in)
+{
+ struct strbuf continuation = STRBUF_INIT;
+
+ /* Get the first part of the line. */
+ if (strbuf_getline_lf(line, in))
+ return 0;
+
+ /*
+ * Is it an empty line or not a valid rfc2822 header?
+ * If so, stop here, and return false ("not a header")
+ */
+ strbuf_rtrim(line);
+ if (!line->len || !is_rfc2822_header(line)) {
+ /* Re-add the newline */
+ strbuf_addch(line, '\n');
+ return 0;
+ }
+
+ /*
+ * Now we need to eat all the continuation lines..
+ * Yuck, 2822 header "folding"
+ */
+ for (;;) {
+ int peek;
+
+ peek = fgetc(in);
+ if (peek == EOF)
+ break;
+ ungetc(peek, in);
+ if (peek != ' ' && peek != '\t')
+ break;
+ if (strbuf_getline_lf(&continuation, in))
+ break;
+ continuation.buf[0] = ' ';
+ strbuf_rtrim(&continuation);
+ strbuf_addbuf(line, &continuation);
+ }
+ strbuf_release(&continuation);
+
+ return 1;
+}
+
+static int find_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+ while (!strbuf_getline_lf(line, mi->input)) {
+ if (*(mi->content_top) && is_multipart_boundary(mi, line))
+ return 1;
+ }
+ return 0;
+}
+
+static int handle_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf newline = STRBUF_INIT;
+
+ strbuf_addch(&newline, '\n');
+again:
+ if (line->len >= (*(mi->content_top))->len + 2 &&
+ !memcmp(line->buf + (*(mi->content_top))->len, "--", 2)) {
+ /* we hit an end boundary */
+ /* pop the current boundary off the stack */
+ strbuf_release(*(mi->content_top));
+ FREE_AND_NULL(*(mi->content_top));
+
+ /* technically won't happen as is_multipart_boundary()
+ will fail first. But just in case..
+ */
+ if (--mi->content_top < mi->content) {
+ error("Detected mismatched boundaries, can't recover");
+ mi->input_error = -1;
+ mi->content_top = mi->content;
+ strbuf_release(&newline);
+ return 0;
+ }
+ handle_filter(mi, &newline);
+ strbuf_release(&newline);
+ if (mi->input_error)
+ return 0;
+
+ /* skip to the next boundary */
+ if (!find_boundary(mi, line))
+ return 0;
+ goto again;
+ }
+
+ /* set some defaults */
+ mi->transfer_encoding = TE_DONTCARE;
+ strbuf_reset(&mi->charset);
+
+ /* slurp in this section's info */
+ while (read_one_header_line(line, mi->input))
+ check_header(mi, line, mi->p_hdr_data, 0);
+
+ strbuf_release(&newline);
+ /* replenish line */
+ if (strbuf_getline_lf(line, mi->input))
+ return 0;
+ strbuf_addch(line, '\n');
+ return 1;
+}
+
+static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line,
+ struct strbuf *prev)
+{
+ size_t len = line->len;
+ const char *rest;
+
+ if (!mi->format_flowed) {
+ handle_filter(mi, line);
+ return;
+ }
+
+ if (line->buf[len - 1] == '\n') {
+ len--;
+ if (len && line->buf[len - 1] == '\r')
+ len--;
+ }
+
+ /* Keep signature separator as-is. */
+ if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) {
+ if (prev->len) {
+ handle_filter(mi, prev);
+ strbuf_reset(prev);
+ }
+ handle_filter(mi, line);
+ return;
+ }
+
+ /* Unstuff space-stuffed line. */
+ if (len && line->buf[0] == ' ') {
+ strbuf_remove(line, 0, 1);
+ len--;
+ }
+
+ /* Save flowed line for later, but without the soft line break. */
+ if (len && line->buf[len - 1] == ' ') {
+ strbuf_add(prev, line->buf, len - !!mi->delsp);
+ return;
+ }
+
+ /* Prepend any previous partial lines */
+ strbuf_insert(line, 0, prev->buf, prev->len);
+ strbuf_reset(prev);
+
+ handle_filter(mi, line);
+}
+
+static void handle_body(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf prev = STRBUF_INIT;
+
+ /* Skip up to the first boundary */
+ if (*(mi->content_top)) {
+ if (!find_boundary(mi, line))
+ goto handle_body_out;
+ }
+
+ do {
+ /* process any boundary lines */
+ if (*(mi->content_top) && is_multipart_boundary(mi, line)) {
+ /* flush any leftover */
+ if (prev.len) {
+ handle_filter(mi, &prev);
+ strbuf_reset(&prev);
+ }
+ if (!handle_boundary(mi, line))
+ goto handle_body_out;
+ }
+
+ /* Unwrap transfer encoding */
+ decode_transfer_encoding(mi, line);
+
+ switch (mi->transfer_encoding) {
+ case TE_BASE64:
+ case TE_QP:
+ {
+ struct strbuf **lines, **it, *sb;
+
+ /* Prepend any previous partial lines */
+ strbuf_insert(line, 0, prev.buf, prev.len);
+ strbuf_reset(&prev);
+
+ /*
+ * This is a decoded line that may contain
+ * multiple new lines. Pass only one chunk
+ * at a time to handle_filter()
+ */
+ lines = strbuf_split(line, '\n');
+ for (it = lines; (sb = *it); it++) {
+ if (*(it + 1) == NULL) /* The last line */
+ if (sb->buf[sb->len - 1] != '\n') {
+ /* Partial line, save it for later. */
+ strbuf_addbuf(&prev, sb);
+ break;
+ }
+ handle_filter_flowed(mi, sb, &prev);
+ }
+ /*
+ * The partial chunk is saved in "prev" and will be
+ * appended by the next iteration of read_line_with_nul().
+ */
+ strbuf_list_free(lines);
+ break;
+ }
+ default:
+ handle_filter_flowed(mi, line, &prev);
+ }
+
+ if (mi->input_error)
+ break;
+ } while (!strbuf_getwholeline(line, mi->input, '\n'));
+
+ if (prev.len)
+ handle_filter(mi, &prev);
+
+ flush_inbody_header_accum(mi);
+
+handle_body_out:
+ strbuf_release(&prev);
+}
+
+static void output_header_lines(FILE *fout, const char *hdr, const struct strbuf *data)
+{
+ const char *sp = data->buf;
+ while (1) {
+ char *ep = strchr(sp, '\n');
+ int len;
+ if (!ep)
+ len = strlen(sp);
+ else
+ len = ep - sp;
+ fprintf(fout, "%s: %.*s\n", hdr, len, sp);
+ if (!ep)
+ break;
+ sp = ep + 1;
+ }
+}
+
+static void handle_info(struct mailinfo *mi)
+{
+ struct strbuf *hdr;
+ int i;
+
+ for (i = 0; header[i]; i++) {
+ /* only print inbody headers if we output a patch file */
+ if (mi->patch_lines && mi->s_hdr_data[i])
+ hdr = mi->s_hdr_data[i];
+ else if (mi->p_hdr_data[i])
+ hdr = mi->p_hdr_data[i];
+ else
+ continue;
+
+ if (!strcmp(header[i], "Subject")) {
+ if (!mi->keep_subject) {
+ cleanup_subject(mi, hdr);
+ cleanup_space(hdr);
+ }
+ output_header_lines(mi->output, "Subject", hdr);
+ } else if (!strcmp(header[i], "From")) {
+ cleanup_space(hdr);
+ handle_from(mi, hdr);
+ fprintf(mi->output, "Author: %s\n", mi->name.buf);
+ fprintf(mi->output, "Email: %s\n", mi->email.buf);
+ } else {
+ cleanup_space(hdr);
+ fprintf(mi->output, "%s: %s\n", header[i], hdr->buf);
+ }
+ }
+ fprintf(mi->output, "\n");
+}
+
+int mailinfo(struct mailinfo *mi, const char *msg, const char *patch)
+{
+ FILE *cmitmsg;
+ int peek;
+ struct strbuf line = STRBUF_INIT;
+
+ cmitmsg = fopen(msg, "w");
+ if (!cmitmsg) {
+ perror(msg);
+ return -1;
+ }
+ mi->patchfile = fopen(patch, "w");
+ if (!mi->patchfile) {
+ perror(patch);
+ fclose(cmitmsg);
+ return -1;
+ }
+
+ mi->p_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->p_hdr_data)));
+ mi->s_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->s_hdr_data)));
+
+ do {
+ peek = fgetc(mi->input);
+ if (peek == EOF) {
+ fclose(cmitmsg);
+ return error("empty patch: '%s'", patch);
+ }
+ } while (isspace(peek));
+ ungetc(peek, mi->input);
+
+ /* process the email header */
+ while (read_one_header_line(&line, mi->input))
+ check_header(mi, &line, mi->p_hdr_data, 1);
+
+ handle_body(mi, &line);
+ fwrite(mi->log_message.buf, 1, mi->log_message.len, cmitmsg);
+ fclose(cmitmsg);
+ fclose(mi->patchfile);
+
+ handle_info(mi);
+ strbuf_release(&line);
+ return mi->input_error;
+}
+
+static int git_mailinfo_config(const char *var, const char *value, void *mi_)
+{
+ struct mailinfo *mi = mi_;
+
+ if (!starts_with(var, "mailinfo."))
+ return git_default_config(var, value, NULL);
+ if (!strcmp(var, "mailinfo.scissors")) {
+ mi->use_scissors = git_config_bool(var, value);
+ return 0;
+ }
+ /* perhaps others here */
+ return 0;
+}
+
+void setup_mailinfo(struct mailinfo *mi)
+{
+ memset(mi, 0, sizeof(*mi));
+ strbuf_init(&mi->name, 0);
+ strbuf_init(&mi->email, 0);
+ strbuf_init(&mi->charset, 0);
+ strbuf_init(&mi->log_message, 0);
+ strbuf_init(&mi->inbody_header_accum, 0);
+ mi->header_stage = 1;
+ mi->use_inbody_headers = 1;
+ mi->content_top = mi->content;
+ git_config(git_mailinfo_config, mi);
+}
+
+void clear_mailinfo(struct mailinfo *mi)
+{
+ int i;
+
+ strbuf_release(&mi->name);
+ strbuf_release(&mi->email);
+ strbuf_release(&mi->charset);
+ strbuf_release(&mi->inbody_header_accum);
+ free(mi->message_id);
+
+ if (mi->p_hdr_data)
+ for (i = 0; mi->p_hdr_data[i]; i++)
+ strbuf_release(mi->p_hdr_data[i]);
+ free(mi->p_hdr_data);
+ if (mi->s_hdr_data)
+ for (i = 0; mi->s_hdr_data[i]; i++)
+ strbuf_release(mi->s_hdr_data[i]);
+ free(mi->s_hdr_data);
+
+ while (mi->content < mi->content_top) {
+ free(*(mi->content_top));
+ mi->content_top--;
+ }
+
+ strbuf_release(&mi->log_message);
+}
diff --git a/t/t4256/1/mailinfo.c.orig b/t/t4256/1/mailinfo.c.orig
new file mode 100644
index 0000000000..3281a37d51
--- /dev/null
+++ b/t/t4256/1/mailinfo.c.orig
@@ -0,0 +1,1185 @@
+#include "cache.h"
+#include "config.h"
+#include "utf8.h"
+#include "strbuf.h"
+#include "mailinfo.h"
+
+static void cleanup_space(struct strbuf *sb)
+{
+ size_t pos, cnt;
+ for (pos = 0; pos < sb->len; pos++) {
+ if (isspace(sb->buf[pos])) {
+ sb->buf[pos] = ' ';
+ for (cnt = 0; isspace(sb->buf[pos + cnt + 1]); cnt++);
+ strbuf_remove(sb, pos + 1, cnt);
+ }
+ }
+}
+
+static void get_sane_name(struct strbuf *out, struct strbuf *name, struct strbuf *email)
+{
+ struct strbuf *src = name;
+ if (name->len < 3 || 60 < name->len || strchr(name->buf, '@') ||
+ strchr(name->buf, '<') || strchr(name->buf, '>'))
+ src = email;
+ else if (name == out)
+ return;
+ strbuf_reset(out);
+ strbuf_addbuf(out, src);
+}
+
+static void parse_bogus_from(struct mailinfo *mi, const struct strbuf *line)
+{
+ /* John Doe <johndoe> */
+
+ char *bra, *ket;
+ /* This is fallback, so do not bother if we already have an
+ * e-mail address.
+ */
+ if (mi->email.len)
+ return;
+
+ bra = strchr(line->buf, '<');
+ if (!bra)
+ return;
+ ket = strchr(bra, '>');
+ if (!ket)
+ return;
+
+ strbuf_reset(&mi->email);
+ strbuf_add(&mi->email, bra + 1, ket - bra - 1);
+
+ strbuf_reset(&mi->name);
+ strbuf_add(&mi->name, line->buf, bra - line->buf);
+ strbuf_trim(&mi->name);
+ get_sane_name(&mi->name, &mi->name, &mi->email);
+}
+
+static const char *unquote_comment(struct strbuf *outbuf, const char *in)
+{
+ int c;
+ int take_next_literally = 0;
+
+ strbuf_addch(outbuf, '(');
+
+ while ((c = *in++) != 0) {
+ if (take_next_literally == 1) {
+ take_next_literally = 0;
+ } else {
+ switch (c) {
+ case '\\':
+ take_next_literally = 1;
+ continue;
+ case '(':
+ in = unquote_comment(outbuf, in);
+ continue;
+ case ')':
+ strbuf_addch(outbuf, ')');
+ return in;
+ }
+ }
+
+ strbuf_addch(outbuf, c);
+ }
+
+ return in;
+}
+
+static const char *unquote_quoted_string(struct strbuf *outbuf, const char *in)
+{
+ int c;
+ int take_next_literally = 0;
+
+ while ((c = *in++) != 0) {
+ if (take_next_literally == 1) {
+ take_next_literally = 0;
+ } else {
+ switch (c) {
+ case '\\':
+ take_next_literally = 1;
+ continue;
+ case '"':
+ return in;
+ }
+ }
+
+ strbuf_addch(outbuf, c);
+ }
+
+ return in;
+}
+
+static void unquote_quoted_pair(struct strbuf *line)
+{
+ struct strbuf outbuf;
+ const char *in = line->buf;
+ int c;
+
+ strbuf_init(&outbuf, line->len);
+
+ while ((c = *in++) != 0) {
+ switch (c) {
+ case '"':
+ in = unquote_quoted_string(&outbuf, in);
+ continue;
+ case '(':
+ in = unquote_comment(&outbuf, in);
+ continue;
+ }
+
+ strbuf_addch(&outbuf, c);
+ }
+
+ strbuf_swap(&outbuf, line);
+ strbuf_release(&outbuf);
+
+}
+
+static void handle_from(struct mailinfo *mi, const struct strbuf *from)
+{
+ char *at;
+ size_t el;
+ struct strbuf f;
+
+ strbuf_init(&f, from->len);
+ strbuf_addbuf(&f, from);
+
+ unquote_quoted_pair(&f);
+
+ at = strchr(f.buf, '@');
+ if (!at) {
+ parse_bogus_from(mi, from);
+ goto out;
+ }
+
+ /*
+ * If we already have one email, don't take any confusing lines
+ */
+ if (mi->email.len && strchr(at + 1, '@'))
+ goto out;
+
+ /* Pick up the string around '@', possibly delimited with <>
+ * pair; that is the email part.
+ */
+ while (at > f.buf) {
+ char c = at[-1];
+ if (isspace(c))
+ break;
+ if (c == '<') {
+ at[-1] = ' ';
+ break;
+ }
+ at--;
+ }
+ el = strcspn(at, " \n\t\r\v\f>");
+ strbuf_reset(&mi->email);
+ strbuf_add(&mi->email, at, el);
+ strbuf_remove(&f, at - f.buf, el + (at[el] ? 1 : 0));
+
+ /* The remainder is name. It could be
+ *
+ * - "John Doe <john.doe@xz>" (a), or
+ * - "john.doe@xz (John Doe)" (b), or
+ * - "John (zzz) Doe <john.doe@xz> (Comment)" (c)
+ *
+ * but we have removed the email part, so
+ *
+ * - remove extra spaces which could stay after email (case 'c'), and
+ * - trim from both ends, possibly removing the () pair at the end
+ * (cases 'a' and 'b').
+ */
+ cleanup_space(&f);
+ strbuf_trim(&f);
+ if (f.buf[0] == '(' && f.len && f.buf[f.len - 1] == ')') {
+ strbuf_remove(&f, 0, 1);
+ strbuf_setlen(&f, f.len - 1);
+ }
+
+ get_sane_name(&mi->name, &f, &mi->email);
+out:
+ strbuf_release(&f);
+}
+
+static void handle_header(struct strbuf **out, const struct strbuf *line)
+{
+ if (!*out) {
+ *out = xmalloc(sizeof(struct strbuf));
+ strbuf_init(*out, line->len);
+ } else
+ strbuf_reset(*out);
+
+ strbuf_addbuf(*out, line);
+}
+
+/* NOTE NOTE NOTE. We do not claim we do full MIME. We just attempt
+ * to have enough heuristics to grok MIME encoded patches often found
+ * on our mailing lists. For example, we do not even treat header lines
+ * case insensitively.
+ */
+
+static int slurp_attr(const char *line, const char *name, struct strbuf *attr)
+{
+ const char *ends, *ap = strcasestr(line, name);
+ size_t sz;
+
+ strbuf_setlen(attr, 0);
+ if (!ap)
+ return 0;
+ ap += strlen(name);
+ if (*ap == '"') {
+ ap++;
+ ends = "\"";
+ }
+ else
+ ends = "; \t";
+ sz = strcspn(ap, ends);
+ strbuf_add(attr, ap, sz);
+ return 1;
+}
+
+static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
+ strbuf_init(boundary, line->len);
+
+ if (slurp_attr(line->buf, "boundary=", boundary)) {
+ strbuf_insert(boundary, 0, "--", 2);
+ if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
+ error("Too many boundaries to handle");
+ mi->input_error = -1;
+ mi->content_top = &mi->content[MAX_BOUNDARIES] - 1;
+ return;
+ }
+ *(mi->content_top) = boundary;
+ boundary = NULL;
+ }
+ slurp_attr(line->buf, "charset=", &mi->charset);
+
+ if (boundary) {
+ strbuf_release(boundary);
+ free(boundary);
+ }
+}
+
+static void handle_content_transfer_encoding(struct mailinfo *mi,
+ const struct strbuf *line)
+{
+ if (strcasestr(line->buf, "base64"))
+ mi->transfer_encoding = TE_BASE64;
+ else if (strcasestr(line->buf, "quoted-printable"))
+ mi->transfer_encoding = TE_QP;
+ else
+ mi->transfer_encoding = TE_DONTCARE;
+}
+
+static int is_multipart_boundary(struct mailinfo *mi, const struct strbuf *line)
+{
+ struct strbuf *content_top = *(mi->content_top);
+
+ return ((content_top->len <= line->len) &&
+ !memcmp(line->buf, content_top->buf, content_top->len));
+}
+
+static void cleanup_subject(struct mailinfo *mi, struct strbuf *subject)
+{
+ size_t at = 0;
+
+ while (at < subject->len) {
+ char *pos;
+ size_t remove;
+
+ switch (subject->buf[at]) {
+ case 'r': case 'R':
+ if (subject->len <= at + 3)
+ break;
+ if ((subject->buf[at + 1] == 'e' ||
+ subject->buf[at + 1] == 'E') &&
+ subject->buf[at + 2] == ':') {
+ strbuf_remove(subject, at, 3);
+ continue;
+ }
+ at++;
+ break;
+ case ' ': case '\t': case ':':
+ strbuf_remove(subject, at, 1);
+ continue;
+ case '[':
+ pos = strchr(subject->buf + at, ']');
+ if (!pos)
+ break;
+ remove = pos - subject->buf + at + 1;
+ if (!mi->keep_non_patch_brackets_in_subject ||
+ (7 <= remove &&
+ memmem(subject->buf + at, remove, "PATCH", 5)))
+ strbuf_remove(subject, at, remove);
+ else {
+ at += remove;
+ /*
+ * If the input had a space after the ], keep
+ * it. We don't bother with finding the end of
+ * the space, since we later normalize it
+ * anyway.
+ */
+ if (isspace(subject->buf[at]))
+ at += 1;
+ }
+ continue;
+ }
+ break;
+ }
+ strbuf_trim(subject);
+}
+
+#define MAX_HDR_PARSED 10
+static const char *header[MAX_HDR_PARSED] = {
+ "From","Subject","Date",
+};
+
+static inline int cmp_header(const struct strbuf *line, const char *hdr)
+{
+ int len = strlen(hdr);
+ return !strncasecmp(line->buf, hdr, len) && line->len > len &&
+ line->buf[len] == ':' && isspace(line->buf[len + 1]);
+}
+
+static int is_format_patch_separator(const char *line, int len)
+{
+ static const char SAMPLE[] =
+ "From e6807f3efca28b30decfecb1732a56c7db1137ee Mon Sep 17 00:00:00 2001\n";
+ const char *cp;
+
+ if (len != strlen(SAMPLE))
+ return 0;
+ if (!skip_prefix(line, "From ", &cp))
+ return 0;
+ if (strspn(cp, "0123456789abcdef") != 40)
+ return 0;
+ cp += 40;
+ return !memcmp(SAMPLE + (cp - line), cp, strlen(SAMPLE) - (cp - line));
+}
+
+static struct strbuf *decode_q_segment(const struct strbuf *q_seg, int rfc2047)
+{
+ const char *in = q_seg->buf;
+ int c;
+ struct strbuf *out = xmalloc(sizeof(struct strbuf));
+ strbuf_init(out, q_seg->len);
+
+ while ((c = *in++) != 0) {
+ if (c == '=') {
+ int ch, d = *in;
+ if (d == '\n' || !d)
+ break; /* drop trailing newline */
+ ch = hex2chr(in);
+ if (ch >= 0) {
+ strbuf_addch(out, ch);
+ in += 2;
+ continue;
+ }
+ /* garbage -- fall through */
+ }
+ if (rfc2047 && c == '_') /* rfc2047 4.2 (2) */
+ c = 0x20;
+ strbuf_addch(out, c);
+ }
+ return out;
+}
+
+static struct strbuf *decode_b_segment(const struct strbuf *b_seg)
+{
+ /* Decode in..ep, possibly in-place to ot */
+ int c, pos = 0, acc = 0;
+ const char *in = b_seg->buf;
+ struct strbuf *out = xmalloc(sizeof(struct strbuf));
+ strbuf_init(out, b_seg->len);
+
+ while ((c = *in++) != 0) {
+ if (c == '+')
+ c = 62;
+ else if (c == '/')
+ c = 63;
+ else if ('A' <= c && c <= 'Z')
+ c -= 'A';
+ else if ('a' <= c && c <= 'z')
+ c -= 'a' - 26;
+ else if ('0' <= c && c <= '9')
+ c -= '0' - 52;
+ else
+ continue; /* garbage */
+ switch (pos++) {
+ case 0:
+ acc = (c << 2);
+ break;
+ case 1:
+ strbuf_addch(out, (acc | (c >> 4)));
+ acc = (c & 15) << 4;
+ break;
+ case 2:
+ strbuf_addch(out, (acc | (c >> 2)));
+ acc = (c & 3) << 6;
+ break;
+ case 3:
+ strbuf_addch(out, (acc | c));
+ acc = pos = 0;
+ break;
+ }
+ }
+ return out;
+}
+
+static int convert_to_utf8(struct mailinfo *mi,
+ struct strbuf *line, const char *charset)
+{
+ char *out;
+
+ if (!mi->metainfo_charset || !charset || !*charset)
+ return 0;
+
+ if (same_encoding(mi->metainfo_charset, charset))
+ return 0;
+ out = reencode_string(line->buf, mi->metainfo_charset, charset);
+ if (!out) {
+ mi->input_error = -1;
+ return error("cannot convert from %s to %s",
+ charset, mi->metainfo_charset);
+ }
+ strbuf_attach(line, out, strlen(out), strlen(out));
+ return 0;
+}
+
+static void decode_header(struct mailinfo *mi, struct strbuf *it)
+{
+ char *in, *ep, *cp;
+ struct strbuf outbuf = STRBUF_INIT, *dec;
+ struct strbuf charset_q = STRBUF_INIT, piecebuf = STRBUF_INIT;
+ int found_error = 1; /* pessimism */
+
+ in = it->buf;
+ while (in - it->buf <= it->len && (ep = strstr(in, "=?")) != NULL) {
+ int encoding;
+ strbuf_reset(&charset_q);
+ strbuf_reset(&piecebuf);
+
+ if (in != ep) {
+ /*
+ * We are about to process an encoded-word
+ * that begins at ep, but there is something
+ * before the encoded word.
+ */
+ char *scan;
+ for (scan = in; scan < ep; scan++)
+ if (!isspace(*scan))
+ break;
+
+ if (scan != ep || in == it->buf) {
+ /*
+ * We should not lose that "something",
+ * unless we have just processed an
+ * encoded-word, and there is only LWS
+ * before the one we are about to process.
+ */
+ strbuf_add(&outbuf, in, ep - in);
+ }
+ }
+ /* E.g.
+ * ep : "=?iso-2022-jp?B?GyR...?= foo"
+ * ep : "=?ISO-8859-1?Q?Foo=FCbar?= baz"
+ */
+ ep += 2;
+
+ if (ep - it->buf >= it->len || !(cp = strchr(ep, '?')))
+ goto release_return;
+
+ if (cp + 3 - it->buf > it->len)
+ goto release_return;
+ strbuf_add(&charset_q, ep, cp - ep);
+
+ encoding = cp[1];
+ if (!encoding || cp[2] != '?')
+ goto release_return;
+ ep = strstr(cp + 3, "?=");
+ if (!ep)
+ goto release_return;
+ strbuf_add(&piecebuf, cp + 3, ep - cp - 3);
+ switch (tolower(encoding)) {
+ default:
+ goto release_return;
+ case 'b':
+ dec = decode_b_segment(&piecebuf);
+ break;
+ case 'q':
+ dec = decode_q_segment(&piecebuf, 1);
+ break;
+ }
+ if (convert_to_utf8(mi, dec, charset_q.buf))
+ goto release_return;
+
+ strbuf_addbuf(&outbuf, dec);
+ strbuf_release(dec);
+ free(dec);
+ in = ep + 2;
+ }
+ strbuf_addstr(&outbuf, in);
+ strbuf_reset(it);
+ strbuf_addbuf(it, &outbuf);
+ found_error = 0;
+release_return:
+ strbuf_release(&outbuf);
+ strbuf_release(&charset_q);
+ strbuf_release(&piecebuf);
+
+ if (found_error)
+ mi->input_error = -1;
+}
+
+static int check_header(struct mailinfo *mi,
+ const struct strbuf *line,
+ struct strbuf *hdr_data[], int overwrite)
+{
+ int i, ret = 0, len;
+ struct strbuf sb = STRBUF_INIT;
+
+ /* search for the interesting parts */
+ for (i = 0; header[i]; i++) {
+ int len = strlen(header[i]);
+ if ((!hdr_data[i] || overwrite) && cmp_header(line, header[i])) {
+ /* Unwrap inline B and Q encoding, and optionally
+ * normalize the meta information to utf8.
+ */
+ strbuf_add(&sb, line->buf + len + 2, line->len - len - 2);
+ decode_header(mi, &sb);
+ handle_header(&hdr_data[i], &sb);
+ ret = 1;
+ goto check_header_out;
+ }
+ }
+
+ /* Content stuff */
+ if (cmp_header(line, "Content-Type")) {
+ len = strlen("Content-Type: ");
+ strbuf_add(&sb, line->buf + len, line->len - len);
+ decode_header(mi, &sb);
+ strbuf_insert(&sb, 0, "Content-Type: ", len);
+ handle_content_type(mi, &sb);
+ ret = 1;
+ goto check_header_out;
+ }
+ if (cmp_header(line, "Content-Transfer-Encoding")) {
+ len = strlen("Content-Transfer-Encoding: ");
+ strbuf_add(&sb, line->buf + len, line->len - len);
+ decode_header(mi, &sb);
+ handle_content_transfer_encoding(mi, &sb);
+ ret = 1;
+ goto check_header_out;
+ }
+ if (cmp_header(line, "Message-Id")) {
+ len = strlen("Message-Id: ");
+ strbuf_add(&sb, line->buf + len, line->len - len);
+ decode_header(mi, &sb);
+ if (mi->add_message_id)
+ mi->message_id = strbuf_detach(&sb, NULL);
+ ret = 1;
+ goto check_header_out;
+ }
+
+check_header_out:
+ strbuf_release(&sb);
+ return ret;
+}
+
+/*
+ * Returns 1 if the given line or any line beginning with the given line is an
+ * in-body header (that is, check_header will succeed when passed
+ * mi->s_hdr_data).
+ */
+static int is_inbody_header(const struct mailinfo *mi,
+ const struct strbuf *line)
+{
+ int i;
+ for (i = 0; header[i]; i++)
+ if (!mi->s_hdr_data[i] && cmp_header(line, header[i]))
+ return 1;
+ return 0;
+}
+
+static void decode_transfer_encoding(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf *ret;
+
+ switch (mi->transfer_encoding) {
+ case TE_QP:
+ ret = decode_q_segment(line, 0);
+ break;
+ case TE_BASE64:
+ ret = decode_b_segment(line);
+ break;
+ case TE_DONTCARE:
+ default:
+ return;
+ }
+ strbuf_reset(line);
+ strbuf_addbuf(line, ret);
+ strbuf_release(ret);
+ free(ret);
+}
+
+static inline int patchbreak(const struct strbuf *line)
+{
+ size_t i;
+
+ /* Beginning of a "diff -" header? */
+ if (starts_with(line->buf, "diff -"))
+ return 1;
+
+ /* CVS "Index: " line? */
+ if (starts_with(line->buf, "Index: "))
+ return 1;
+
+ /*
+ * "--- <filename>" starts patches without headers
+ * "---<sp>*" is a manual separator
+ */
+ if (line->len < 4)
+ return 0;
+
+ if (starts_with(line->buf, "---")) {
+ /* space followed by a filename? */
+ if (line->buf[3] == ' ' && !isspace(line->buf[4]))
+ return 1;
+ /* Just whitespace? */
+ for (i = 3; i < line->len; i++) {
+ unsigned char c = line->buf[i];
+ if (c == '\n')
+ return 1;
+ if (!isspace(c))
+ break;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+static int is_scissors_line(const char *line)
+{
+ const char *c;
+ int scissors = 0, gap = 0;
+ const char *first_nonblank = NULL, *last_nonblank = NULL;
+ int visible, perforation = 0, in_perforation = 0;
+
+ for (c = line; *c; c++) {
+ if (isspace(*c)) {
+ if (in_perforation) {
+ perforation++;
+ gap++;
+ }
+ continue;
+ }
+ last_nonblank = c;
+ if (first_nonblank == NULL)
+ first_nonblank = c;
+ if (*c == '-') {
+ in_perforation = 1;
+ perforation++;
+ continue;
+ }
+ if ((!memcmp(c, ">8", 2) || !memcmp(c, "8<", 2) ||
+ !memcmp(c, ">%", 2) || !memcmp(c, "%<", 2))) {
+ in_perforation = 1;
+ perforation += 2;
+ scissors += 2;
+ c++;
+ continue;
+ }
+ in_perforation = 0;
+ }
+
+ /*
+ * The mark must be at least 8 bytes long (e.g. "-- >8 --").
+ * Even though there can be arbitrary cruft on the same line
+ * (e.g. "cut here"), in order to avoid misidentification, the
+ * perforation must occupy more than a third of the visible
+ * width of the line, and dashes and scissors must occupy more
+ * than half of the perforation.
+ */
+
+ if (first_nonblank && last_nonblank)
+ visible = last_nonblank - first_nonblank + 1;
+ else
+ visible = 0;
+ return (scissors && 8 <= visible &&
+ visible < perforation * 3 &&
+ gap * 2 < perforation);
+}
+
+static void flush_inbody_header_accum(struct mailinfo *mi)
+{
+ if (!mi->inbody_header_accum.len)
+ return;
+ if (!check_header(mi, &mi->inbody_header_accum, mi->s_hdr_data, 0))
+ BUG("inbody_header_accum, if not empty, must always contain a valid in-body header");
+ strbuf_reset(&mi->inbody_header_accum);
+}
+
+static int check_inbody_header(struct mailinfo *mi, const struct strbuf *line)
+{
+ if (mi->inbody_header_accum.len &&
+ (line->buf[0] == ' ' || line->buf[0] == '\t')) {
+ if (mi->use_scissors && is_scissors_line(line->buf)) {
+ /*
+ * This is a scissors line; do not consider this line
+ * as a header continuation line.
+ */
+ flush_inbody_header_accum(mi);
+ return 0;
+ }
+ strbuf_strip_suffix(&mi->inbody_header_accum, "\n");
+ strbuf_addbuf(&mi->inbody_header_accum, line);
+ return 1;
+ }
+
+ flush_inbody_header_accum(mi);
+
+ if (starts_with(line->buf, ">From") && isspace(line->buf[5]))
+ return is_format_patch_separator(line->buf + 1, line->len - 1);
+ if (starts_with(line->buf, "[PATCH]") && isspace(line->buf[7])) {
+ int i;
+ for (i = 0; header[i]; i++)
+ if (!strcmp("Subject", header[i])) {
+ handle_header(&mi->s_hdr_data[i], line);
+ return 1;
+ }
+ return 0;
+ }
+ if (is_inbody_header(mi, line)) {
+ strbuf_addbuf(&mi->inbody_header_accum, line);
+ return 1;
+ }
+ return 0;
+}
+
+static int handle_commit_msg(struct mailinfo *mi, struct strbuf *line)
+{
+ assert(!mi->filter_stage);
+
+ if (mi->header_stage) {
+ if (!line->len || (line->len == 1 && line->buf[0] == '\n')) {
+ if (mi->inbody_header_accum.len) {
+ flush_inbody_header_accum(mi);
+ mi->header_stage = 0;
+ }
+ return 0;
+ }
+ }
+
+ if (mi->use_inbody_headers && mi->header_stage) {
+ mi->header_stage = check_inbody_header(mi, line);
+ if (mi->header_stage)
+ return 0;
+ } else
+ /* Only trim the first (blank) line of the commit message
+ * when ignoring in-body headers.
+ */
+ mi->header_stage = 0;
+
+ /* normalize the log message to UTF-8. */
+ if (convert_to_utf8(mi, line, mi->charset.buf))
+ return 0; /* mi->input_error already set */
+
+ if (mi->use_scissors && is_scissors_line(line->buf)) {
+ int i;
+
+ strbuf_setlen(&mi->log_message, 0);
+ mi->header_stage = 1;
+
+ /*
+ * We may have already read "secondary headers"; purge
+ * them to give ourselves a clean restart.
+ */
+ for (i = 0; header[i]; i++) {
+ if (mi->s_hdr_data[i])
+ strbuf_release(mi->s_hdr_data[i]);
+ mi->s_hdr_data[i] = NULL;
+ }
+ return 0;
+ }
+
+ if (patchbreak(line)) {
+ if (mi->message_id)
+ strbuf_addf(&mi->log_message,
+ "Message-Id: %s\n", mi->message_id);
+ return 1;
+ }
+
+ strbuf_addbuf(&mi->log_message, line);
+ return 0;
+}
+
+static void handle_patch(struct mailinfo *mi, const struct strbuf *line)
+{
+ fwrite(line->buf, 1, line->len, mi->patchfile);
+ mi->patch_lines++;
+}
+
+static void handle_filter(struct mailinfo *mi, struct strbuf *line)
+{
+ switch (mi->filter_stage) {
+ case 0:
+ if (!handle_commit_msg(mi, line))
+ break;
+ mi->filter_stage++;
+ /* fallthrough */
+ case 1:
+ handle_patch(mi, line);
+ break;
+ }
+}
+
+static int is_rfc2822_header(const struct strbuf *line)
+{
+ /*
+ * The section that defines the loosest possible
+ * field name is "3.6.8 Optional fields".
+ *
+ * optional-field = field-name ":" unstructured CRLF
+ * field-name = 1*ftext
+ * ftext = %d33-57 / %59-126
+ */
+ int ch;
+ char *cp = line->buf;
+
+ /* Count mbox From headers as headers */
+ if (starts_with(cp, "From ") || starts_with(cp, ">From "))
+ return 1;
+
+ while ((ch = *cp++)) {
+ if (ch == ':')
+ return 1;
+ if ((33 <= ch && ch <= 57) ||
+ (59 <= ch && ch <= 126))
+ continue;
+ break;
+ }
+ return 0;
+}
+
+static int read_one_header_line(struct strbuf *line, FILE *in)
+{
+ struct strbuf continuation = STRBUF_INIT;
+
+ /* Get the first part of the line. */
+ if (strbuf_getline_lf(line, in))
+ return 0;
+
+ /*
+ * Is it an empty line or not a valid rfc2822 header?
+ * If so, stop here, and return false ("not a header")
+ */
+ strbuf_rtrim(line);
+ if (!line->len || !is_rfc2822_header(line)) {
+ /* Re-add the newline */
+ strbuf_addch(line, '\n');
+ return 0;
+ }
+
+ /*
+ * Now we need to eat all the continuation lines..
+ * Yuck, 2822 header "folding"
+ */
+ for (;;) {
+ int peek;
+
+ peek = fgetc(in);
+ if (peek == EOF)
+ break;
+ ungetc(peek, in);
+ if (peek != ' ' && peek != '\t')
+ break;
+ if (strbuf_getline_lf(&continuation, in))
+ break;
+ continuation.buf[0] = ' ';
+ strbuf_rtrim(&continuation);
+ strbuf_addbuf(line, &continuation);
+ }
+ strbuf_release(&continuation);
+
+ return 1;
+}
+
+static int find_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+ while (!strbuf_getline_lf(line, mi->input)) {
+ if (*(mi->content_top) && is_multipart_boundary(mi, line))
+ return 1;
+ }
+ return 0;
+}
+
+static int handle_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf newline = STRBUF_INIT;
+
+ strbuf_addch(&newline, '\n');
+again:
+ if (line->len >= (*(mi->content_top))->len + 2 &&
+ !memcmp(line->buf + (*(mi->content_top))->len, "--", 2)) {
+ /* we hit an end boundary */
+ /* pop the current boundary off the stack */
+ strbuf_release(*(mi->content_top));
+ FREE_AND_NULL(*(mi->content_top));
+
+ /* technically won't happen as is_multipart_boundary()
+ will fail first. But just in case..
+ */
+ if (--mi->content_top < mi->content) {
+ error("Detected mismatched boundaries, can't recover");
+ mi->input_error = -1;
+ mi->content_top = mi->content;
+ strbuf_release(&newline);
+ return 0;
+ }
+ handle_filter(mi, &newline);
+ strbuf_release(&newline);
+ if (mi->input_error)
+ return 0;
+
+ /* skip to the next boundary */
+ if (!find_boundary(mi, line))
+ return 0;
+ goto again;
+ }
+
+ /* set some defaults */
+ mi->transfer_encoding = TE_DONTCARE;
+ strbuf_reset(&mi->charset);
+
+ /* slurp in this section's info */
+ while (read_one_header_line(line, mi->input))
+ check_header(mi, line, mi->p_hdr_data, 0);
+
+ strbuf_release(&newline);
+ /* replenish line */
+ if (strbuf_getline_lf(line, mi->input))
+ return 0;
+ strbuf_addch(line, '\n');
+ return 1;
+}
+
+static void handle_body(struct mailinfo *mi, struct strbuf *line)
+{
+ struct strbuf prev = STRBUF_INIT;
+
+ /* Skip up to the first boundary */
+ if (*(mi->content_top)) {
+ if (!find_boundary(mi, line))
+ goto handle_body_out;
+ }
+
+ do {
+ /* process any boundary lines */
+ if (*(mi->content_top) && is_multipart_boundary(mi, line)) {
+ /* flush any leftover */
+ if (prev.len) {
+ handle_filter(mi, &prev);
+ strbuf_reset(&prev);
+ }
+ if (!handle_boundary(mi, line))
+ goto handle_body_out;
+ }
+
+ /* Unwrap transfer encoding */
+ decode_transfer_encoding(mi, line);
+
+ switch (mi->transfer_encoding) {
+ case TE_BASE64:
+ case TE_QP:
+ {
+ struct strbuf **lines, **it, *sb;
+
+ /* Prepend any previous partial lines */
+ strbuf_insert(line, 0, prev.buf, prev.len);
+ strbuf_reset(&prev);
+
+ /*
+ * This is a decoded line that may contain
+ * multiple new lines. Pass only one chunk
+ * at a time to handle_filter()
+ */
+ lines = strbuf_split(line, '\n');
+ for (it = lines; (sb = *it); it++) {
+ if (*(it + 1) == NULL) /* The last line */
+ if (sb->buf[sb->len - 1] != '\n') {
+ /* Partial line, save it for later. */
+ strbuf_addbuf(&prev, sb);
+ break;
+ }
+ handle_filter(mi, sb);
+ }
+ /*
+ * The partial chunk is saved in "prev" and will be
+ * appended by the next iteration of read_line_with_nul().
+ */
+ strbuf_list_free(lines);
+ break;
+ }
+ default:
+ handle_filter(mi, line);
+ }
+
+ if (mi->input_error)
+ break;
+ } while (!strbuf_getwholeline(line, mi->input, '\n'));
+
+ flush_inbody_header_accum(mi);
+
+handle_body_out:
+ strbuf_release(&prev);
+}
+
+static void output_header_lines(FILE *fout, const char *hdr, const struct strbuf *data)
+{
+ const char *sp = data->buf;
+ while (1) {
+ char *ep = strchr(sp, '\n');
+ int len;
+ if (!ep)
+ len = strlen(sp);
+ else
+ len = ep - sp;
+ fprintf(fout, "%s: %.*s\n", hdr, len, sp);
+ if (!ep)
+ break;
+ sp = ep + 1;
+ }
+}
+
+static void handle_info(struct mailinfo *mi)
+{
+ struct strbuf *hdr;
+ int i;
+
+ for (i = 0; header[i]; i++) {
+ /* only print inbody headers if we output a patch file */
+ if (mi->patch_lines && mi->s_hdr_data[i])
+ hdr = mi->s_hdr_data[i];
+ else if (mi->p_hdr_data[i])
+ hdr = mi->p_hdr_data[i];
+ else
+ continue;
+
+ if (!strcmp(header[i], "Subject")) {
+ if (!mi->keep_subject) {
+ cleanup_subject(mi, hdr);
+ cleanup_space(hdr);
+ }
+ output_header_lines(mi->output, "Subject", hdr);
+ } else if (!strcmp(header[i], "From")) {
+ cleanup_space(hdr);
+ handle_from(mi, hdr);
+ fprintf(mi->output, "Author: %s\n", mi->name.buf);
+ fprintf(mi->output, "Email: %s\n", mi->email.buf);
+ } else {
+ cleanup_space(hdr);
+ fprintf(mi->output, "%s: %s\n", header[i], hdr->buf);
+ }
+ }
+ fprintf(mi->output, "\n");
+}
+
+int mailinfo(struct mailinfo *mi, const char *msg, const char *patch)
+{
+ FILE *cmitmsg;
+ int peek;
+ struct strbuf line = STRBUF_INIT;
+
+ cmitmsg = fopen(msg, "w");
+ if (!cmitmsg) {
+ perror(msg);
+ return -1;
+ }
+ mi->patchfile = fopen(patch, "w");
+ if (!mi->patchfile) {
+ perror(patch);
+ fclose(cmitmsg);
+ return -1;
+ }
+
+ mi->p_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->p_hdr_data)));
+ mi->s_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->s_hdr_data)));
+
+ do {
+ peek = fgetc(mi->input);
+ if (peek == EOF) {
+ fclose(cmitmsg);
+ return error("empty patch: '%s'", patch);
+ }
+ } while (isspace(peek));
+ ungetc(peek, mi->input);
+
+ /* process the email header */
+ while (read_one_header_line(&line, mi->input))
+ check_header(mi, &line, mi->p_hdr_data, 1);
+
+ handle_body(mi, &line);
+ fwrite(mi->log_message.buf, 1, mi->log_message.len, cmitmsg);
+ fclose(cmitmsg);
+ fclose(mi->patchfile);
+
+ handle_info(mi);
+ strbuf_release(&line);
+ return mi->input_error;
+}
+
+static int git_mailinfo_config(const char *var, const char *value, void *mi_)
+{
+ struct mailinfo *mi = mi_;
+
+ if (!starts_with(var, "mailinfo."))
+ return git_default_config(var, value, NULL);
+ if (!strcmp(var, "mailinfo.scissors")) {
+ mi->use_scissors = git_config_bool(var, value);
+ return 0;
+ }
+ /* perhaps others here */
+ return 0;
+}
+
+void setup_mailinfo(struct mailinfo *mi)
+{
+ memset(mi, 0, sizeof(*mi));
+ strbuf_init(&mi->name, 0);
+ strbuf_init(&mi->email, 0);
+ strbuf_init(&mi->charset, 0);
+ strbuf_init(&mi->log_message, 0);
+ strbuf_init(&mi->inbody_header_accum, 0);
+ mi->header_stage = 1;
+ mi->use_inbody_headers = 1;
+ mi->content_top = mi->content;
+ git_config(git_mailinfo_config, mi);
+}
+
+void clear_mailinfo(struct mailinfo *mi)
+{
+ int i;
+
+ strbuf_release(&mi->name);
+ strbuf_release(&mi->email);
+ strbuf_release(&mi->charset);
+ strbuf_release(&mi->inbody_header_accum);
+ free(mi->message_id);
+
+ if (mi->p_hdr_data)
+ for (i = 0; mi->p_hdr_data[i]; i++)
+ strbuf_release(mi->p_hdr_data[i]);
+ free(mi->p_hdr_data);
+ if (mi->s_hdr_data)
+ for (i = 0; mi->s_hdr_data[i]; i++)
+ strbuf_release(mi->s_hdr_data[i]);
+ free(mi->s_hdr_data);
+
+ while (mi->content < mi->content_top) {
+ free(*(mi->content_top));
+ mi->content_top--;
+ }
+
+ strbuf_release(&mi->log_message);
+}
diff --git a/t/t4256/1/patch b/t/t4256/1/patch
new file mode 100644
index 0000000000..bd0d8b0251
--- /dev/null
+++ b/t/t4256/1/patch
@@ -0,0 +1,129 @@
+From: A <author@example.com>
+Subject: [PATCH] mailinfo: support format=flowed
+Message-ID: <aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa@example.com>
+Date: Sat, 25 Aug 2018 22:04:50 +0200
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101
+ Thunderbird/60.0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf-8; format=flowed
+Content-Language: en-US
+Content-Transfer-Encoding: 7bit
+
+---
+ mailinfo.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 62 insertions(+), 2 deletions(-)
+
+diff --git a/mailinfo.c b/mailinfo.c
+index 3281a37d51..b395adbdf2 100644
+--- a/mailinfo.c
++++ b/mailinfo.c
+@@ -237,11 +237,22 @@ static int slurp_attr(const char *line, const char
+*name, struct strbuf *attr)
+ return 1;
+ }
+
++static int has_attr_value(const char *line, const char *name, const
+char *value)
++{
++ struct strbuf sb = STRBUF_INIT;
++ int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value);
++ strbuf_release(&sb);
++ return rc;
++}
++
+ static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
+ {
+ struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
+ strbuf_init(boundary, line->len);
+
++ mi->format_flowed = has_attr_value(line->buf, "format=", "flowed");
++ mi->delsp = has_attr_value(line->buf, "delsp=", "yes");
++
+ if (slurp_attr(line->buf, "boundary=", boundary)) {
+ strbuf_insert(boundary, 0, "--", 2);
+ if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
+@@ -964,6 +975,52 @@ static int handle_boundary(struct mailinfo *mi,
+struct strbuf *line)
+ return 1;
+ }
+
++static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line,
++ struct strbuf *prev)
++{
++ size_t len = line->len;
++ const char *rest;
++
++ if (!mi->format_flowed) {
++ handle_filter(mi, line);
++ return;
++ }
++
++ if (line->buf[len - 1] == '\n') {
++ len--;
++ if (len && line->buf[len - 1] == '\r')
++ len--;
++ }
++
++ /* Keep signature separator as-is. */
++ if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) {
++ if (prev->len) {
++ handle_filter(mi, prev);
++ strbuf_reset(prev);
++ }
++ handle_filter(mi, line);
++ return;
++ }
++
++ /* Unstuff space-stuffed line. */
++ if (len && line->buf[0] == ' ') {
++ strbuf_remove(line, 0, 1);
++ len--;
++ }
++
++ /* Save flowed line for later, but without the soft line break. */
++ if (len && line->buf[len - 1] == ' ') {
++ strbuf_add(prev, line->buf, len - !!mi->delsp);
++ return;
++ }
++
++ /* Prepend any previous partial lines */
++ strbuf_insert(line, 0, prev->buf, prev->len);
++ strbuf_reset(prev);
++
++ handle_filter(mi, line);
++}
++
+ static void handle_body(struct mailinfo *mi, struct strbuf *line)
+ {
+ struct strbuf prev = STRBUF_INIT;
+@@ -1012,7 +1069,7 @@ static void handle_body(struct mailinfo *mi,
+struct strbuf *line)
+ strbuf_addbuf(&prev, sb);
+ break;
+ }
+- handle_filter(mi, sb);
++ handle_filter_flowed(mi, sb, &prev);
+ }
+ /*
+ * The partial chunk is saved in "prev" and will be
+@@ -1022,13 +1079,16 @@ static void handle_body(struct mailinfo *mi,
+struct strbuf *line)
+ break;
+ }
+ default:
+- handle_filter(mi, line);
++ handle_filter_flowed(mi, line, &prev);
+ }
+
+ if (mi->input_error)
+ break;
+ } while (!strbuf_getwholeline(line, mi->input, '\n'));
+
++ if (prev.len)
++ handle_filter(mi, &prev);
++
+ flush_inbody_header_accum(mi);
+
+ handle_body_out:
+--
+2.18.0
diff --git a/t/t5303-pack-corruption-resilience.sh b/t/t5303-pack-corruption-resilience.sh
index 3634e258f8..41e6dc4dcf 100755
--- a/t/t5303-pack-corruption-resilience.sh
+++ b/t/t5303-pack-corruption-resilience.sh
@@ -311,4 +311,93 @@ test_expect_success \
test_must_fail git cat-file blob $blob_2 > /dev/null &&
test_must_fail git cat-file blob $blob_3 > /dev/null'
+# \0 - empty base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+test_expect_success \
+ 'apply good minimal delta' \
+ 'printf "\0\1\1X" > minimal_delta &&
+ test-tool delta -p /dev/null minimal_delta /dev/null'
+
+# \0 - empty base
+# \1 - 1 byte in result
+# \2 - two literal bytes (one too many)
+test_expect_success \
+ 'apply delta with too many literal bytes' \
+ 'printf "\0\1\2XX" > too_big_literal &&
+ test_must_fail test-tool delta -p /dev/null too_big_literal /dev/null'
+
+# \4 - four bytes in base
+# \1 - one byte in result
+# \221 - copy, one byte offset, one byte size
+# \0 - copy from offset 0
+# \2 - copy two bytes (one too many)
+test_expect_success \
+ 'apply delta with too many copied bytes' \
+ 'printf "\4\1\221\0\2" > too_big_copy &&
+ printf base >base &&
+ test_must_fail test-tool delta -p base too_big_copy /dev/null'
+
+# \0 - empty base
+# \2 - two bytes in result
+# \2 - two literal bytes (we are short one)
+test_expect_success \
+ 'apply delta with too few literal bytes' \
+ 'printf "\0\2\2X" > truncated_delta &&
+ test_must_fail test-tool delta -p /dev/null truncated_delta /dev/null'
+
+# \0 - empty base
+# \1 - one byte in result
+# \221 - copy, one byte offset, one byte size
+# \0 - copy from offset 0
+# \1 - copy one byte (we are short one)
+test_expect_success \
+ 'apply delta with too few bytes in base' \
+ 'printf "\0\1\221\0\1" > truncated_base &&
+ test_must_fail test-tool delta -p /dev/null truncated_base /dev/null'
+
+# \4 - four bytes in base
+# \2 - two bytes in result
+# \1 - one literal byte (X)
+# \221 - copy, one byte offset, one byte size
+# (offset/size missing)
+#
+# Note that the literal byte is necessary to get past the uninteresting minimum
+# delta size check.
+test_expect_success \
+ 'apply delta with truncated copy parameters' \
+ 'printf "\4\2\1X\221" > truncated_copy_delta &&
+ printf base >base &&
+ test_must_fail test-tool delta -p base truncated_copy_delta /dev/null'
+
+# \0 - empty base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+# \1 - trailing garbage command
+test_expect_success \
+ 'apply delta with trailing garbage literal' \
+ 'printf "\0\1\1X\1" > tail_garbage_literal &&
+ test_must_fail test-tool delta -p /dev/null tail_garbage_literal /dev/null'
+
+# \4 - four bytes in base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+# \221 - copy, one byte offset, one byte size
+# \0 - copy from offset 0
+# \1 - copy 1 byte
+test_expect_success \
+ 'apply delta with trailing garbage copy' \
+ 'printf "\4\1\1X\221\0\1" > tail_garbage_copy &&
+ printf base >base &&
+ test_must_fail test-tool delta -p /dev/null tail_garbage_copy /dev/null'
+
+# \0 - empty base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+# \0 - bogus opcode
+test_expect_success \
+ 'apply delta with trailing garbage opcode' \
+ 'printf "\0\1\1X\0" > tail_garbage_opcode &&
+ test_must_fail test-tool delta -p /dev/null tail_garbage_opcode /dev/null'
+
test_done
diff --git a/t/t5307-pack-missing-commit.sh b/t/t5307-pack-missing-commit.sh
index ae52a1882d..dacb440b27 100755
--- a/t/t5307-pack-missing-commit.sh
+++ b/t/t5307-pack-missing-commit.sh
@@ -24,11 +24,11 @@ test_expect_success 'check corruption' '
'
test_expect_success 'rev-list notices corruption (1)' '
- test_must_fail git rev-list HEAD
+ test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list HEAD
'
test_expect_success 'rev-list notices corruption (2)' '
- test_must_fail git rev-list --objects HEAD
+ test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list --objects HEAD
'
test_expect_success 'pack-objects notices corruption' '
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index 7bff7923f2..1be3459c5b 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -342,4 +342,97 @@ test_expect_success 'truncated bitmap fails gracefully' '
test_i18ngrep corrupt stderr
'
+# have_delta <obj> <expected_base>
+#
+# Note that because this relies on cat-file, it might find _any_ copy of an
+# object in the repository. The caller is responsible for making sure
+# there's only one (e.g., via "repack -ad", or having just fetched a copy).
+have_delta () {
+ echo $2 >expect &&
+ echo $1 | git cat-file --batch-check="%(deltabase)" >actual &&
+ test_cmp expect actual
+}
+
+# Create a state of history with these properties:
+#
+# - refs that allow a client to fetch some new history, while sharing some old
+# history with the server; we use branches delta-reuse-old and
+# delta-reuse-new here
+#
+# - the new history contains an object that is stored on the server as a delta
+# against a base that is in the old history
+#
+# - the base object is not immediately reachable from the tip of the old
+# history; finding it would involve digging down through history we know the
+# other side has
+#
+# This should result in a state where fetching from old->new would not
+# traditionally reuse the on-disk delta (because we'd have to dig to realize
+# that the client has it), but we will do so if bitmaps can tell us cheaply
+# that the other side has it.
+test_expect_success 'set up thin delta-reuse parent' '
+ # This first commit contains the buried base object.
+ test-tool genrandom delta 16384 >file &&
+ git add file &&
+ git commit -m "delta base" &&
+ base=$(git rev-parse --verify HEAD:file) &&
+
+ # These intermediate commits bury the base back in history.
+ # This becomes the "old" state.
+ for i in 1 2 3 4 5
+ do
+ echo $i >file &&
+ git commit -am "intermediate $i" || return 1
+ done &&
+ git branch delta-reuse-old &&
+
+ # And now our new history has a delta against the buried base. Note
+ # that this must be smaller than the original file, since pack-objects
+ # prefers to create deltas from smaller objects to larger.
+ test-tool genrandom delta 16300 >file &&
+ git commit -am "delta result" &&
+ delta=$(git rev-parse --verify HEAD:file) &&
+ git branch delta-reuse-new &&
+
+ # Repack with bitmaps and double check that we have the expected delta
+ # relationship.
+ git repack -adb &&
+ have_delta $delta $base
+'
+
+# Now we can sanity-check the non-bitmap behavior (that the server is not able
+# to reuse the delta). This isn't strictly something we care about, so this
+# test could be scrapped in the future. But it makes sure that the next test is
+# actually triggering the feature we want.
+#
+# Note that our tools for working with on-the-wire "thin" packs are limited. So
+# we actually perform the fetch, retain the resulting pack, and inspect the
+# result.
+test_expect_success 'fetch without bitmaps ignores delta against old base' '
+ test_config pack.usebitmaps false &&
+ test_when_finished "rm -rf client.git" &&
+ git init --bare client.git &&
+ (
+ cd client.git &&
+ git config transfer.unpackLimit 1 &&
+ git fetch .. delta-reuse-old:delta-reuse-old &&
+ git fetch .. delta-reuse-new:delta-reuse-new &&
+ have_delta $delta $ZERO_OID
+ )
+'
+
+# And do the same for the bitmap case, where we do expect to find the delta.
+test_expect_success 'fetch with bitmaps can reuse old base' '
+ test_config pack.usebitmaps true &&
+ test_when_finished "rm -rf client.git" &&
+ git init --bare client.git &&
+ (
+ cd client.git &&
+ git config transfer.unpackLimit 1 &&
+ git fetch .. delta-reuse-old:delta-reuse-old &&
+ git fetch .. delta-reuse-new:delta-reuse-new &&
+ have_delta $delta $base
+ )
+'
+
test_done
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
new file mode 100755
index 0000000000..6f56b38674
--- /dev/null
+++ b/t/t5319-multi-pack-index.sh
@@ -0,0 +1,217 @@
+#!/bin/sh
+
+test_description='multi-pack-indexes'
+. ./test-lib.sh
+
+objdir=.git/objects
+
+midx_read_expect () {
+ NUM_PACKS=$1
+ NUM_OBJECTS=$2
+ NUM_CHUNKS=$3
+ OBJECT_DIR=$4
+ EXTRA_CHUNKS="$5"
+ {
+ cat <<-EOF &&
+ header: 4d494458 1 $NUM_CHUNKS $NUM_PACKS
+ chunks: pack-names oid-fanout oid-lookup object-offsets$EXTRA_CHUNKS
+ num_objects: $NUM_OBJECTS
+ packs:
+ EOF
+ if test $NUM_PACKS -ge 1
+ then
+ ls $OBJECT_DIR/pack/ | grep idx | sort
+ fi &&
+ printf "object-dir: $OBJECT_DIR\n"
+ } >expect &&
+ test-tool read-midx $OBJECT_DIR >actual &&
+ test_cmp expect actual
+}
+
+test_expect_success 'write midx with no packs' '
+ test_when_finished rm -f pack/multi-pack-index &&
+ git multi-pack-index --object-dir=. write &&
+ midx_read_expect 0 0 4 .
+'
+
+generate_objects () {
+ i=$1
+ iii=$(printf '%03i' $i)
+ {
+ test-tool genrandom "bar" 200 &&
+ test-tool genrandom "baz $iii" 50
+ } >wide_delta_$iii &&
+ {
+ test-tool genrandom "foo"$i 100 &&
+ test-tool genrandom "foo"$(( $i + 1 )) 100 &&
+ test-tool genrandom "foo"$(( $i + 2 )) 100
+ } >deep_delta_$iii &&
+ {
+ echo $iii &&
+ test-tool genrandom "$iii" 8192
+ } >file_$iii &&
+ git update-index --add file_$iii deep_delta_$iii wide_delta_$iii
+}
+
+commit_and_list_objects () {
+ {
+ echo 101 &&
+ test-tool genrandom 100 8192;
+ } >file_101 &&
+ git update-index --add file_101 &&
+ tree=$(git write-tree) &&
+ commit=$(git commit-tree $tree -p HEAD</dev/null) &&
+ {
+ echo $tree &&
+ git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\) .*/\\1/"
+ } >obj-list &&
+ git reset --hard $commit
+}
+
+test_expect_success 'create objects' '
+ test_commit initial &&
+ for i in $(test_seq 1 5)
+ do
+ generate_objects $i
+ done &&
+ commit_and_list_objects
+'
+
+test_expect_success 'write midx with one v1 pack' '
+ pack=$(git pack-objects --index-version=1 $objdir/pack/test <obj-list) &&
+ test_when_finished rm $objdir/pack/test-$pack.pack \
+ $objdir/pack/test-$pack.idx $objdir/pack/multi-pack-index &&
+ git multi-pack-index --object-dir=$objdir write &&
+ midx_read_expect 1 18 4 $objdir
+'
+
+midx_git_two_modes () {
+ if [ "$2" = "sorted" ]
+ then
+ git -c core.multiPackIndex=false $1 | sort >expect &&
+ git -c core.multiPackIndex=true $1 | sort >actual
+ else
+ git -c core.multiPackIndex=false $1 >expect &&
+ git -c core.multiPackIndex=true $1 >actual
+ fi &&
+ test_cmp expect actual
+}
+
+compare_results_with_midx () {
+ MSG=$1
+ test_expect_success "check normal git operations: $MSG" '
+ midx_git_two_modes "rev-list --objects --all" &&
+ midx_git_two_modes "log --raw" &&
+ midx_git_two_modes "count-objects --verbose" &&
+ midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check" &&
+ midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check --unsorted" sorted
+ '
+}
+
+test_expect_success 'write midx with one v2 pack' '
+ git pack-objects --index-version=2,0x40 $objdir/pack/test <obj-list &&
+ git multi-pack-index --object-dir=$objdir write &&
+ midx_read_expect 1 18 4 $objdir
+'
+
+compare_results_with_midx "one v2 pack"
+
+test_expect_success 'add more objects' '
+ for i in $(test_seq 6 10)
+ do
+ generate_objects $i
+ done &&
+ commit_and_list_objects
+'
+
+test_expect_success 'write midx with two packs' '
+ git pack-objects --index-version=1 $objdir/pack/test-2 <obj-list &&
+ git multi-pack-index --object-dir=$objdir write &&
+ midx_read_expect 2 34 4 $objdir
+'
+
+compare_results_with_midx "two packs"
+
+test_expect_success 'add more packs' '
+ for j in $(test_seq 11 20)
+ do
+ generate_objects $j &&
+ commit_and_list_objects &&
+ git pack-objects --index-version=2 $objdir/pack/test-pack <obj-list
+ done
+'
+
+compare_results_with_midx "mixed mode (two packs + extra)"
+
+test_expect_success 'write midx with twelve packs' '
+ git multi-pack-index --object-dir=$objdir write &&
+ midx_read_expect 12 74 4 $objdir
+'
+
+compare_results_with_midx "twelve packs"
+
+test_expect_success 'repack removes multi-pack-index' '
+ test_path_is_file $objdir/pack/multi-pack-index &&
+ git repack -adf &&
+ test_path_is_missing $objdir/pack/multi-pack-index
+'
+
+compare_results_with_midx "after repack"
+
+test_expect_success 'multi-pack-index and pack-bitmap' '
+ git -c repack.writeBitmaps=true repack -ad &&
+ git multi-pack-index write &&
+ git rev-list --test-bitmap HEAD
+'
+
+test_expect_success 'multi-pack-index and alternates' '
+ git init --bare alt.git &&
+ echo $(pwd)/alt.git/objects >.git/objects/info/alternates &&
+ echo content1 >file1 &&
+ altblob=$(GIT_DIR=alt.git git hash-object -w file1) &&
+ git cat-file blob $altblob &&
+ git rev-list --all
+'
+
+compare_results_with_midx "with alternate (local midx)"
+
+test_expect_success 'multi-pack-index in an alternate' '
+ mv .git/objects/pack/* alt.git/objects/pack &&
+ test_commit add_local_objects &&
+ git repack --local &&
+ git multi-pack-index write &&
+ midx_read_expect 1 3 4 $objdir &&
+ git reset --hard HEAD~1 &&
+ rm -f .git/objects/pack/*
+'
+
+compare_results_with_midx "with alternate (remote midx)"
+
+
+# usage: corrupt_data <file> <pos> [<data>]
+corrupt_data () {
+ file=$1
+ pos=$2
+ data="${3:-\0}"
+ printf "$data" | dd of="$file" bs=1 seek="$pos" conv=notrunc
+}
+
+# Force 64-bit offsets by manipulating the idx file.
+# This makes the IDX file _incorrect_ so be careful to clean up after!
+test_expect_success 'force some 64-bit offsets with pack-objects' '
+ mkdir objects64 &&
+ mkdir objects64/pack &&
+ for i in $(test_seq 1 11)
+ do
+ generate_objects 11
+ done &&
+ commit_and_list_objects &&
+ pack64=$(git pack-objects --index-version=2,0x40 objects64/pack/test-64 <obj-list) &&
+ idx64=objects64/pack/test-64-$pack64.idx &&
+ chmod u+w $idx64 &&
+ corrupt_data $idx64 2999 "\02" &&
+ midx64=$(git multi-pack-index --object-dir=objects64 write) &&
+ midx_read_expect 1 63 5 objects64 " large-offsets"
+'
+
+test_done
diff --git a/t/t5320-delta-islands.sh b/t/t5320-delta-islands.sh
new file mode 100755
index 0000000000..fea92a5777
--- /dev/null
+++ b/t/t5320-delta-islands.sh
@@ -0,0 +1,143 @@
+#!/bin/sh
+
+test_description='exercise delta islands'
+. ./test-lib.sh
+
+# returns true iff $1 is a delta based on $2
+is_delta_base () {
+ delta_base=$(echo "$1" | git cat-file --batch-check='%(deltabase)') &&
+ echo >&2 "$1 has base $delta_base" &&
+ test "$delta_base" = "$2"
+}
+
+# generate a commit on branch $1 with a single file, "file", whose
+# content is mostly based on the seed $2, but with a unique bit
+# of content $3 appended. This should allow us to see whether
+# blobs of different refs delta against each other.
+commit() {
+ blob=$({ test-tool genrandom "$2" 10240 && echo "$3"; } |
+ git hash-object -w --stdin) &&
+ tree=$(printf '100644 blob %s\tfile\n' "$blob" | git mktree) &&
+ commit=$(echo "$2-$3" | git commit-tree "$tree" ${4:+-p "$4"}) &&
+ git update-ref "refs/heads/$1" "$commit" &&
+ eval "$1"'=$(git rev-parse $1:file)' &&
+ eval "echo >&2 $1=\$$1"
+}
+
+test_expect_success 'setup commits' '
+ commit one seed 1 &&
+ commit two seed 12
+'
+
+# Note: This is heavily dependent on the "prefer larger objects as base"
+# heuristic.
+test_expect_success 'vanilla repack deltas one against two' '
+ git repack -adf &&
+ is_delta_base $one $two
+'
+
+test_expect_success 'island repack with no island definition is vanilla' '
+ git repack -adfi &&
+ is_delta_base $one $two
+'
+
+test_expect_success 'island repack with no matches is vanilla' '
+ git -c "pack.island=refs/foo" repack -adfi &&
+ is_delta_base $one $two
+'
+
+test_expect_success 'separate islands disallows delta' '
+ git -c "pack.island=refs/heads/(.*)" repack -adfi &&
+ ! is_delta_base $one $two &&
+ ! is_delta_base $two $one
+'
+
+test_expect_success 'same island allows delta' '
+ git -c "pack.island=refs/heads" repack -adfi &&
+ is_delta_base $one $two
+'
+
+test_expect_success 'coalesce same-named islands' '
+ git \
+ -c "pack.island=refs/(.*)/one" \
+ -c "pack.island=refs/(.*)/two" \
+ repack -adfi &&
+ is_delta_base $one $two
+'
+
+test_expect_success 'island restrictions drop reused deltas' '
+ git repack -adfi &&
+ is_delta_base $one $two &&
+ git -c "pack.island=refs/heads/(.*)" repack -adi &&
+ ! is_delta_base $one $two &&
+ ! is_delta_base $two $one
+'
+
+test_expect_success 'island regexes are left-anchored' '
+ git -c "pack.island=heads/(.*)" repack -adfi &&
+ is_delta_base $one $two
+'
+
+test_expect_success 'island regexes follow last-one-wins scheme' '
+ git \
+ -c "pack.island=refs/heads/(.*)" \
+ -c "pack.island=refs/heads/" \
+ repack -adfi &&
+ is_delta_base $one $two
+'
+
+test_expect_success 'setup shared history' '
+ commit root shared root &&
+ commit one shared 1 root &&
+ commit two shared 12-long root
+'
+
+# We know that $two will be preferred as a base from $one,
+# because we can transform it with a pure deletion.
+#
+# We also expect $root as a delta against $two by the "longest is base" rule.
+test_expect_success 'vanilla delta goes between branches' '
+ git repack -adf &&
+ is_delta_base $one $two &&
+ is_delta_base $root $two
+'
+
+# Here we should allow $one to base itself on $root; even though
+# they are in different islands, the objects in $root are in a superset
+# of islands compared to those in $one.
+#
+# Similarly, $two can delta against $root by our rules. And unlike $one,
+# in which we are just allowing it, the island rules actually put $root
+# as a possible base for $two, which it would not otherwise be (due to the size
+# sorting).
+test_expect_success 'deltas allowed against superset islands' '
+ git -c "pack.island=refs/heads/(.*)" repack -adfi &&
+ is_delta_base $one $root &&
+ is_delta_base $two $root
+'
+
+# We are going to test the packfile order here, so we again have to make some
+# assumptions. We assume that "$root", as part of our core "one", must come
+# before "$two". This should be guaranteed by the island code. However, for
+# this test to fail without islands, we are also assuming that it would not
+# otherwise do so. This is true by the current write order, which will put
+# commits (and their contents) before their parents.
+test_expect_success 'island core places core objects first' '
+ cat >expect <<-EOF &&
+ $root
+ $two
+ EOF
+ git -c "pack.island=refs/heads/(.*)" \
+ -c "pack.islandcore=one" \
+ repack -adfi &&
+ git verify-pack -v .git/objects/pack/*.pack |
+ cut -d" " -f1 |
+ egrep "$root|$two" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'unmatched island core is not fatal' '
+ git -c "pack.islandcore=one" repack -adfi
+'
+
+test_done
diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh
index 539c25aada..7a8f56db53 100755
--- a/t/t5516-fetch-push.sh
+++ b/t/t5516-fetch-push.sh
@@ -969,7 +969,7 @@ test_force_push_tag () {
tag_type_description=$1
tag_args=$2
- test_expect_success 'force pushing required to update lightweight tag' "
+ test_expect_success "force pushing required to update $tag_type_description" "
mk_test testrepo heads/master &&
mk_child testrepo child1 &&
mk_child testrepo child2 &&
@@ -1009,7 +1009,32 @@ test_force_push_tag () {
}
test_force_push_tag "lightweight tag" "-f"
-test_force_push_tag "annotated tag" "-f -a -m'msg'"
+test_force_push_tag "annotated tag" "-f -a -m'tag message'"
+
+test_force_fetch_tag () {
+ tag_type_description=$1
+ tag_args=$2
+
+ test_expect_success "fetch will not clobber an existing $tag_type_description without --force" "
+ mk_test testrepo heads/master &&
+ mk_child testrepo child1 &&
+ mk_child testrepo child2 &&
+ (
+ cd testrepo &&
+ git tag testTag &&
+ git -C ../child1 fetch origin tag testTag &&
+ >file1 &&
+ git add file1 &&
+ git commit -m 'file1' &&
+ git tag $tag_args testTag &&
+ test_must_fail git -C ../child1 fetch origin tag testTag &&
+ git -C ../child1 fetch origin '+refs/tags/*:refs/tags/*'
+ )
+ "
+}
+
+test_force_fetch_tag "lightweight tag" "-f"
+test_force_fetch_tag "annotated tag" "-f -a -m'tag message'"
test_expect_success 'push --porcelain' '
mk_empty testrepo &&
diff --git a/t/t5562-http-backend-content-length.sh b/t/t5562-http-backend-content-length.sh
index f94d01f69e..b24d8b05a4 100755
--- a/t/t5562-http-backend-content-length.sh
+++ b/t/t5562-http-backend-content-length.sh
@@ -155,8 +155,8 @@ test_expect_success 'CONTENT_LENGTH overflow ssite_t' '
test_expect_success 'empty CONTENT_LENGTH' '
env \
- QUERY_STRING=/repo.git/HEAD \
- PATH_TRANSLATED="$PWD"/.git/HEAD \
+ QUERY_STRING="service=git-receive-pack" \
+ PATH_TRANSLATED="$PWD"/.git/info/refs \
GIT_HTTP_EXPORT_ALL=TRUE \
REQUEST_METHOD=GET \
CONTENT_LENGTH="" \
diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh
index ddaa96ac4f..f1a49e94f5 100755
--- a/t/t5601-clone.sh
+++ b/t/t5601-clone.sh
@@ -624,10 +624,16 @@ test_expect_success 'clone on case-insensitive fs' '
git hash-object -w -t tree --stdin) &&
c=$(git commit-tree -m bogus $t) &&
git update-ref refs/heads/bogus $c &&
- git clone -b bogus . bogus
+ git clone -b bogus . bogus 2>warning
)
'
+test_expect_success !MINGW,!CYGWIN,CASE_INSENSITIVE_FS 'colliding file detection' '
+ grep X icasefs/warning &&
+ grep x icasefs/warning &&
+ test_i18ngrep "the following paths have collided" icasefs/warning
+'
+
partial_clone () {
SERVER="$1" &&
URL="$2" &&
diff --git a/t/t5612-clone-refspec.sh b/t/t5612-clone-refspec.sh
index 5582b3d5fd..e36ac01661 100755
--- a/t/t5612-clone-refspec.sh
+++ b/t/t5612-clone-refspec.sh
@@ -103,7 +103,7 @@ test_expect_success 'clone with --no-tags' '
test_expect_success '--single-branch while HEAD pointing at master' '
(
cd dir_master &&
- git fetch &&
+ git fetch --force &&
git for-each-ref refs/remotes/origin |
sed -e "/HEAD$/d" \
-e "s|/remotes/origin/|/heads/|" >../actual
@@ -114,7 +114,7 @@ test_expect_success '--single-branch while HEAD pointing at master' '
test_cmp expect actual &&
(
cd dir_master &&
- git fetch --tags &&
+ git fetch --tags --force &&
git for-each-ref refs/tags >../actual
) &&
git for-each-ref refs/tags >expect &&
diff --git a/t/t6011-rev-list-with-bad-commit.sh b/t/t6011-rev-list-with-bad-commit.sh
index e51eb41f4b..545b461e51 100755
--- a/t/t6011-rev-list-with-bad-commit.sh
+++ b/t/t6011-rev-list-with-bad-commit.sh
@@ -41,10 +41,9 @@ test_expect_success 'corrupt second commit object' \
test_must_fail git fsck --full
'
-test_expect_success 'rev-list should fail' \
- '
- test_must_fail git rev-list --all > /dev/null
- '
+test_expect_success 'rev-list should fail' '
+ test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list --all > /dev/null
+'
test_expect_success 'git repack _MUST_ fail' \
'
diff --git a/t/t6018-rev-list-glob.sh b/t/t6018-rev-list-glob.sh
index 0bf10d0686..db8a7834d8 100755
--- a/t/t6018-rev-list-glob.sh
+++ b/t/t6018-rev-list-glob.sh
@@ -255,7 +255,7 @@ test_expect_success 'rev-list accumulates multiple --exclude' '
compare rev-list "--exclude=refs/remotes/* --exclude=refs/tags/* --all" --branches
'
-test_expect_failure 'rev-list should succeed with empty output on empty stdin' '
+test_expect_success 'rev-list should succeed with empty output on empty stdin' '
git rev-list --stdin </dev/null >actual &&
test_must_be_empty actual
'
diff --git a/t/t6024-recursive-merge.sh b/t/t6024-recursive-merge.sh
index 3f59e58dfb..27c7de90ce 100755
--- a/t/t6024-recursive-merge.sh
+++ b/t/t6024-recursive-merge.sh
@@ -60,9 +60,9 @@ git update-index a1 &&
GIT_AUTHOR_DATE="2006-12-12 23:00:08" git commit -m F
'
-test_expect_success "combined merge conflicts" "
- test_must_fail git merge -m final G
-"
+test_expect_success 'combined merge conflicts' '
+ test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git merge -m final G
+'
cat > expect << EOF
<<<<<<< HEAD
diff --git a/t/t6135-pathspec-with-attrs.sh b/t/t6135-pathspec-with-attrs.sh
index 77b8cef661..e436a73962 100755
--- a/t/t6135-pathspec-with-attrs.sh
+++ b/t/t6135-pathspec-with-attrs.sh
@@ -166,7 +166,7 @@ test_expect_success 'fail if attr magic is used places not implemented' '
# though, but git-add is convenient as it has its own internal pathspec
# parsing.
test_must_fail git add ":(attr:labelB)" 2>actual &&
- test_i18ngrep "unsupported magic" actual
+ test_i18ngrep "magic not supported" actual
'
test_expect_success 'abort on giving invalid label on the command line' '
diff --git a/t/t6300-for-each-ref.sh b/t/t6300-for-each-ref.sh
index 024f8c06f7..97bfbee6e8 100755
--- a/t/t6300-for-each-ref.sh
+++ b/t/t6300-for-each-ref.sh
@@ -715,6 +715,29 @@ test_expect_success 'basic atom: head contents:trailers' '
test_cmp expect actual.clean
'
+test_expect_success 'trailer parsing not fooled by --- line' '
+ git commit --allow-empty -F - <<-\EOF &&
+ this is the subject
+
+ This is the body. The message has a "---" line which would confuse a
+ message+patch parser. But here we know we have only a commit message,
+ so we get it right.
+
+ trailer: wrong
+ ---
+ This is more body.
+
+ trailer: right
+ EOF
+
+ {
+ echo "trailer: right" &&
+ echo
+ } >expect &&
+ git for-each-ref --format="%(trailers)" refs/heads/master >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'Add symbolic ref for the following tests' '
git symbolic-ref refs/heads/sym refs/heads/master
'
diff --git a/t/t6600-test-reach.sh b/t/t6600-test-reach.sh
new file mode 100755
index 0000000000..ae94b27f70
--- /dev/null
+++ b/t/t6600-test-reach.sh
@@ -0,0 +1,268 @@
+#!/bin/sh
+
+test_description='basic commit reachability tests'
+
+. ./test-lib.sh
+
+# Construct a grid-like commit graph with points (x,y)
+# with 1 <= x <= 10, 1 <= y <= 10, where (x,y) has
+# parents (x-1, y) and (x, y-1), keeping in mind that
+# we drop a parent if a coordinate is nonpositive.
+#
+# (10,10)
+# / \
+# (10,9) (9,10)
+# / \ / \
+# (10,8) (9,9) (8,10)
+# / \ / \ / \
+# ( continued...)
+# \ / \ / \ /
+# (3,1) (2,2) (1,3)
+# \ / \ /
+# (2,1) (2,1)
+# \ /
+# (1,1)
+#
+# We use branch 'commit-x-y' to refer to (x,y).
+# This grid allows interesting reachability and
+# non-reachability queries: (x,y) can reach (x',y')
+# if and only if x' <= x and y' <= y.
+test_expect_success 'setup' '
+ for i in $(test_seq 1 10)
+ do
+ test_commit "1-$i" &&
+ git branch -f commit-1-$i &&
+ git tag -a -m "1-$i" tag-1-$i commit-1-$i
+ done &&
+ for j in $(test_seq 1 9)
+ do
+ git reset --hard commit-$j-1 &&
+ x=$(($j + 1)) &&
+ test_commit "$x-1" &&
+ git branch -f commit-$x-1 &&
+ git tag -a -m "$x-1" tag-$x-1 commit-$x-1 &&
+
+ for i in $(test_seq 2 10)
+ do
+ git merge commit-$j-$i -m "$x-$i" &&
+ git branch -f commit-$x-$i &&
+ git tag -a -m "$x-$i" tag-$x-$i commit-$x-$i
+ done
+ done &&
+ git commit-graph write --reachable &&
+ mv .git/objects/info/commit-graph commit-graph-full &&
+ git show-ref -s commit-5-5 | git commit-graph write --stdin-commits &&
+ mv .git/objects/info/commit-graph commit-graph-half &&
+ git config core.commitGraph true
+'
+
+test_three_modes () {
+ test_when_finished rm -rf .git/objects/info/commit-graph &&
+ test-tool reach $1 <input >actual &&
+ test_cmp expect actual &&
+ cp commit-graph-full .git/objects/info/commit-graph &&
+ test-tool reach $1 <input >actual &&
+ test_cmp expect actual &&
+ cp commit-graph-half .git/objects/info/commit-graph &&
+ test-tool reach $1 <input >actual &&
+ test_cmp expect actual
+}
+
+test_expect_success 'ref_newer:miss' '
+ cat >input <<-\EOF &&
+ A:commit-5-7
+ B:commit-4-9
+ EOF
+ echo "ref_newer(A,B):0" >expect &&
+ test_three_modes ref_newer
+'
+
+test_expect_success 'ref_newer:hit' '
+ cat >input <<-\EOF &&
+ A:commit-5-7
+ B:commit-2-3
+ EOF
+ echo "ref_newer(A,B):1" >expect &&
+ test_three_modes ref_newer
+'
+
+test_expect_success 'in_merge_bases:hit' '
+ cat >input <<-\EOF &&
+ A:commit-5-7
+ B:commit-8-8
+ EOF
+ echo "in_merge_bases(A,B):1" >expect &&
+ test_three_modes in_merge_bases
+'
+
+test_expect_success 'in_merge_bases:miss' '
+ cat >input <<-\EOF &&
+ A:commit-6-8
+ B:commit-5-9
+ EOF
+ echo "in_merge_bases(A,B):0" >expect &&
+ test_three_modes in_merge_bases
+'
+
+test_expect_success 'is_descendant_of:hit' '
+ cat >input <<-\EOF &&
+ A:commit-5-7
+ X:commit-4-8
+ X:commit-6-6
+ X:commit-1-1
+ EOF
+ echo "is_descendant_of(A,X):1" >expect &&
+ test_three_modes is_descendant_of
+'
+
+test_expect_success 'is_descendant_of:miss' '
+ cat >input <<-\EOF &&
+ A:commit-6-8
+ X:commit-5-9
+ X:commit-4-10
+ X:commit-7-6
+ EOF
+ echo "is_descendant_of(A,X):0" >expect &&
+ test_three_modes is_descendant_of
+'
+
+test_expect_success 'get_merge_bases_many' '
+ cat >input <<-\EOF &&
+ A:commit-5-7
+ X:commit-4-8
+ X:commit-6-6
+ X:commit-8-3
+ EOF
+ {
+ echo "get_merge_bases_many(A,X):" &&
+ git rev-parse commit-5-6 \
+ commit-4-7 | sort
+ } >expect &&
+ test_three_modes get_merge_bases_many
+'
+
+test_expect_success 'reduce_heads' '
+ cat >input <<-\EOF &&
+ X:commit-1-10
+ X:commit-2-8
+ X:commit-3-6
+ X:commit-4-4
+ X:commit-1-7
+ X:commit-2-5
+ X:commit-3-3
+ X:commit-5-1
+ EOF
+ {
+ echo "reduce_heads(X):" &&
+ git rev-parse commit-5-1 \
+ commit-4-4 \
+ commit-3-6 \
+ commit-2-8 \
+ commit-1-10 | sort
+ } >expect &&
+ test_three_modes reduce_heads
+'
+
+test_expect_success 'can_all_from_reach:hit' '
+ cat >input <<-\EOF &&
+ X:commit-2-10
+ X:commit-3-9
+ X:commit-4-8
+ X:commit-5-7
+ X:commit-6-6
+ X:commit-7-5
+ X:commit-8-4
+ X:commit-9-3
+ Y:commit-1-9
+ Y:commit-2-8
+ Y:commit-3-7
+ Y:commit-4-6
+ Y:commit-5-5
+ Y:commit-6-4
+ Y:commit-7-3
+ Y:commit-8-1
+ EOF
+ echo "can_all_from_reach(X,Y):1" >expect &&
+ test_three_modes can_all_from_reach
+'
+
+test_expect_success 'can_all_from_reach:miss' '
+ cat >input <<-\EOF &&
+ X:commit-2-10
+ X:commit-3-9
+ X:commit-4-8
+ X:commit-5-7
+ X:commit-6-6
+ X:commit-7-5
+ X:commit-8-4
+ X:commit-9-3
+ Y:commit-1-9
+ Y:commit-2-8
+ Y:commit-3-7
+ Y:commit-4-6
+ Y:commit-5-5
+ Y:commit-6-4
+ Y:commit-8-5
+ EOF
+ echo "can_all_from_reach(X,Y):0" >expect &&
+ test_three_modes can_all_from_reach
+'
+
+test_expect_success 'can_all_from_reach_with_flag: tags case' '
+ cat >input <<-\EOF &&
+ X:tag-2-10
+ X:tag-3-9
+ X:tag-4-8
+ X:commit-5-7
+ X:commit-6-6
+ X:commit-7-5
+ X:commit-8-4
+ X:commit-9-3
+ Y:tag-1-9
+ Y:tag-2-8
+ Y:tag-3-7
+ Y:commit-4-6
+ Y:commit-5-5
+ Y:commit-6-4
+ Y:commit-7-3
+ Y:commit-8-1
+ EOF
+ echo "can_all_from_reach_with_flag(X,_,_,0,0):1" >expect &&
+ test_three_modes can_all_from_reach_with_flag
+'
+
+test_expect_success 'commit_contains:hit' '
+ cat >input <<-\EOF &&
+ A:commit-7-7
+ X:commit-2-10
+ X:commit-3-9
+ X:commit-4-8
+ X:commit-5-7
+ X:commit-6-6
+ X:commit-7-5
+ X:commit-8-4
+ X:commit-9-3
+ EOF
+ echo "commit_contains(_,A,X,_):1" >expect &&
+ test_three_modes commit_contains &&
+ test_three_modes commit_contains --tag
+'
+
+test_expect_success 'commit_contains:miss' '
+ cat >input <<-\EOF &&
+ A:commit-6-5
+ X:commit-2-10
+ X:commit-3-9
+ X:commit-4-8
+ X:commit-5-7
+ X:commit-6-6
+ X:commit-7-5
+ X:commit-8-4
+ X:commit-9-3
+ EOF
+ echo "commit_contains(_,A,X,_):0" >expect &&
+ test_three_modes commit_contains &&
+ test_three_modes commit_contains --tag
+'
+
+test_done
diff --git a/t/t7501-commit.sh b/t/t7501-commit.sh
index 4cae92804d..1a6773ee68 100755
--- a/t/t7501-commit.sh
+++ b/t/t7501-commit.sh
@@ -517,6 +517,22 @@ Myfooter: x" &&
test_cmp expected actual
'
+test_expect_success 'signoff not confused by ---' '
+ cat >expected <<-EOF &&
+ subject
+
+ body
+ ---
+ these dashes confuse the parser!
+
+ Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>
+ EOF
+ # should be a noop, since we already signed
+ git commit --allow-empty --signoff -F expected &&
+ git log -1 --pretty=format:%B >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'multiple -m' '
>negative &&
diff --git a/t/t7513-interpret-trailers.sh b/t/t7513-interpret-trailers.sh
index 164719d1c9..c441861331 100755
--- a/t/t7513-interpret-trailers.sh
+++ b/t/t7513-interpret-trailers.sh
@@ -1417,4 +1417,46 @@ test_expect_success 'unfold' '
test_cmp expected actual
'
+test_expect_success 'handling of --- lines in input' '
+ echo "real-trailer: just right" >expected &&
+
+ git interpret-trailers --parse >actual <<-\EOF &&
+ subject
+
+ body
+
+ not-a-trailer: too soon
+ ------ this is just a line in the commit message with a bunch of
+ ------ dashes; it does not have any syntactic meaning.
+
+ real-trailer: just right
+ ---
+ below the dashed line may be a patch, etc.
+
+ not-a-trailer: too late
+ EOF
+
+ test_cmp expected actual
+'
+
+test_expect_success 'suppress --- handling' '
+ echo "real-trailer: just right" >expected &&
+
+ git interpret-trailers --parse --no-divider >actual <<-\EOF &&
+ subject
+
+ This commit message has a "---" in it, but because we tell
+ interpret-trailers not to respect that, it has no effect.
+
+ not-a-trailer: too soon
+ ---
+
+ This is still the commit message body.
+
+ real-trailer: just right
+ EOF
+
+ test_cmp expected actual
+'
+
test_done
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index 4207af4077..d82fac9d79 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -42,6 +42,8 @@ test_decode_color () {
function name(n) {
if (n == 0) return "RESET";
if (n == 1) return "BOLD";
+ if (n == 2) return "FAINT";
+ if (n == 3) return "ITALIC";
if (n == 7) return "REVERSE";
if (n == 30) return "BLACK";
if (n == 31) return "RED";
diff --git a/t/test-lib.sh b/t/test-lib.sh
index 44288cbb59..3f95bfda60 100644
--- a/t/test-lib.sh
+++ b/t/test-lib.sh
@@ -1083,6 +1083,12 @@ else
test_set_prereq C_LOCALE_OUTPUT
fi
+if test -z "$GIT_TEST_CHECK_CACHE_TREE"
+then
+ GIT_TEST_CHECK_CACHE_TREE=true
+ export GIT_TEST_CHECK_CACHE_TREE
+fi
+
test_lazy_prereq PIPE '
# test whether the filesystem supports FIFOs
test_have_prereq !MINGW,!CYGWIN &&
diff --git a/tempfile.c b/tempfile.c
index 139ecd97f8..d43ad8c191 100644
--- a/tempfile.c
+++ b/tempfile.c
@@ -279,7 +279,7 @@ int reopen_tempfile(struct tempfile *tempfile)
BUG("reopen_tempfile called for an inactive object");
if (0 <= tempfile->fd)
BUG("reopen_tempfile called for an open object");
- tempfile->fd = open(tempfile->filename.buf, O_WRONLY);
+ tempfile->fd = open(tempfile->filename.buf, O_WRONLY|O_TRUNC);
return tempfile->fd;
}
diff --git a/tempfile.h b/tempfile.h
index 36434eb6fa..61d8dc4d1b 100644
--- a/tempfile.h
+++ b/tempfile.h
@@ -236,8 +236,8 @@ extern int close_tempfile_gently(struct tempfile *tempfile);
* it (and nobody else) to inspect or even modify the file's
* contents.
*
- * * `reopen_tempfile()` to reopen the temporary file. Make further
- * updates to the contents.
+ * * `reopen_tempfile()` to reopen the temporary file, truncating the existing
+ * contents. Write out the new contents.
*
* * `rename_tempfile()` to move the file to its permanent location.
*/
diff --git a/trace.c b/trace.c
index fc623e91fd..fa4a2e7120 100644
--- a/trace.c
+++ b/trace.c
@@ -176,10 +176,30 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
strbuf_release(&buf);
}
+static uint64_t perf_start_times[10];
+static int perf_indent;
+
+uint64_t trace_performance_enter(void)
+{
+ uint64_t now;
+
+ if (!trace_want(&trace_perf_key))
+ return 0;
+
+ now = getnanotime();
+ perf_start_times[perf_indent] = now;
+ if (perf_indent + 1 < ARRAY_SIZE(perf_start_times))
+ perf_indent++;
+ else
+ BUG("Too deep indentation");
+ return now;
+}
+
static void trace_performance_vprintf_fl(const char *file, int line,
uint64_t nanos, const char *format,
va_list ap)
{
+ static const char space[] = " ";
struct strbuf buf = STRBUF_INIT;
if (!prepare_trace_line(file, line, &trace_perf_key, &buf))
@@ -188,7 +208,10 @@ static void trace_performance_vprintf_fl(const char *file, int line,
strbuf_addf(&buf, "performance: %.9f s", (double) nanos / 1000000000);
if (format && *format) {
- strbuf_addstr(&buf, ": ");
+ if (perf_indent >= strlen(space))
+ BUG("Too deep indentation");
+
+ strbuf_addf(&buf, ":%.*s ", perf_indent, space);
strbuf_vaddf(&buf, format, ap);
}
@@ -244,6 +267,24 @@ void trace_performance_since(uint64_t start, const char *format, ...)
va_end(ap);
}
+void trace_performance_leave(const char *format, ...)
+{
+ va_list ap;
+ uint64_t since;
+
+ if (perf_indent)
+ perf_indent--;
+
+ if (!format) /* Allow callers to leave without tracing anything */
+ return;
+
+ since = perf_start_times[perf_indent];
+ va_start(ap, format);
+ trace_performance_vprintf_fl(NULL, 0, getnanotime() - since,
+ format, ap);
+ va_end(ap);
+}
+
#else
void trace_printf_key_fl(const char *file, int line, struct trace_key *key,
@@ -273,6 +314,24 @@ void trace_performance_fl(const char *file, int line, uint64_t nanos,
va_end(ap);
}
+void trace_performance_leave_fl(const char *file, int line,
+ uint64_t nanos, const char *format, ...)
+{
+ va_list ap;
+ uint64_t since;
+
+ if (perf_indent)
+ perf_indent--;
+
+ if (!format) /* Allow callers to leave without tracing anything */
+ return;
+
+ since = perf_start_times[perf_indent];
+ va_start(ap, format);
+ trace_performance_vprintf_fl(file, line, nanos - since, format, ap);
+ va_end(ap);
+}
+
#endif /* HAVE_VARIADIC_MACROS */
@@ -411,13 +470,11 @@ uint64_t getnanotime(void)
}
}
-static uint64_t command_start_time;
static struct strbuf command_line = STRBUF_INIT;
static void print_command_performance_atexit(void)
{
- trace_performance_since(command_start_time, "git command:%s",
- command_line.buf);
+ trace_performance_leave("git command:%s", command_line.buf);
}
void trace_command_performance(const char **argv)
@@ -425,10 +482,10 @@ void trace_command_performance(const char **argv)
if (!trace_want(&trace_perf_key))
return;
- if (!command_start_time)
+ if (!command_line.len)
atexit(print_command_performance_atexit);
strbuf_reset(&command_line);
sq_quote_argv_pretty(&command_line, argv);
- command_start_time = getnanotime();
+ trace_performance_enter();
}
diff --git a/trace.h b/trace.h
index 2b6a1bc17c..171b256d26 100644
--- a/trace.h
+++ b/trace.h
@@ -23,6 +23,7 @@ extern void trace_disable(struct trace_key *key);
extern uint64_t getnanotime(void);
extern void trace_command_performance(const char **argv);
extern void trace_verbatim(struct trace_key *key, const void *buf, unsigned len);
+uint64_t trace_performance_enter(void);
#ifndef HAVE_VARIADIC_MACROS
@@ -45,6 +46,9 @@ extern void trace_performance(uint64_t nanos, const char *format, ...);
__attribute__((format (printf, 2, 3)))
extern void trace_performance_since(uint64_t start, const char *format, ...);
+__attribute__((format (printf, 1, 2)))
+void trace_performance_leave(const char *format, ...);
+
#else
/*
@@ -118,6 +122,14 @@ extern void trace_performance_since(uint64_t start, const char *format, ...);
__VA_ARGS__); \
} while (0)
+#define trace_performance_leave(...) \
+ do { \
+ if (trace_pass_fl(&trace_perf_key)) \
+ trace_performance_leave_fl(TRACE_CONTEXT, __LINE__, \
+ getnanotime(), \
+ __VA_ARGS__); \
+ } while (0)
+
/* backend functions, use non-*fl macros instead */
__attribute__((format (printf, 4, 5)))
extern void trace_printf_key_fl(const char *file, int line, struct trace_key *key,
@@ -130,6 +142,9 @@ extern void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
__attribute__((format (printf, 4, 5)))
extern void trace_performance_fl(const char *file, int line,
uint64_t nanos, const char *fmt, ...);
+__attribute__((format (printf, 4, 5)))
+extern void trace_performance_leave_fl(const char *file, int line,
+ uint64_t nanos, const char *fmt, ...);
static inline int trace_pass_fl(struct trace_key *key)
{
return key->fd || !key->initialized;
diff --git a/trailer.c b/trailer.c
index 4e309460d1..0796f326b3 100644
--- a/trailer.c
+++ b/trailer.c
@@ -585,7 +585,7 @@ static const char *token_from_item(struct arg_item *item, char *tok)
return item->conf.name;
}
-static int token_matches_item(const char *tok, struct arg_item *item, int tok_len)
+static int token_matches_item(const char *tok, struct arg_item *item, size_t tok_len)
{
if (!strncasecmp(tok, item->conf.name, tok_len))
return 1;
@@ -603,7 +603,7 @@ static int token_matches_item(const char *tok, struct arg_item *item, int tok_le
* distinguished from the non-well-formed-line case (in which this function
* returns -1) because some callers of this function need such a distinction.
*/
-static int find_separator(const char *line, const char *separators)
+static ssize_t find_separator(const char *line, const char *separators)
{
int whitespace_found = 0;
const char *c;
@@ -630,10 +630,10 @@ static int find_separator(const char *line, const char *separators)
*/
static void parse_trailer(struct strbuf *tok, struct strbuf *val,
const struct conf_info **conf, const char *trailer,
- int separator_pos)
+ ssize_t separator_pos)
{
struct arg_item *item;
- int tok_len;
+ size_t tok_len;
struct list_head *pos;
if (separator_pos != -1) {
@@ -721,7 +721,7 @@ static void process_command_line_args(struct list_head *arg_head,
list_for_each(pos, new_trailer_head) {
struct new_trailer_item *tr =
list_entry(pos, struct new_trailer_item, list);
- int separator_pos = find_separator(tr->text, cl_separators);
+ ssize_t separator_pos = find_separator(tr->text, cl_separators);
if (separator_pos == 0) {
struct strbuf sb = STRBUF_INIT;
@@ -763,9 +763,9 @@ static const char *next_line(const char *str)
/*
* Return the position of the start of the last line. If len is 0, return -1.
*/
-static int last_line(const char *buf, size_t len)
+static ssize_t last_line(const char *buf, size_t len)
{
- int i;
+ ssize_t i;
if (len == 0)
return -1;
if (len == 1)
@@ -788,12 +788,14 @@ static int last_line(const char *buf, size_t len)
* Return the position of the start of the patch or the length of str if there
* is no patch in the message.
*/
-static int find_patch_start(const char *str)
+static size_t find_patch_start(const char *str)
{
const char *s;
for (s = str; *s; s = next_line(s)) {
- if (starts_with(s, "---"))
+ const char *v;
+
+ if (skip_prefix(s, "---", &v) && isspace(*v))
return s - str;
}
@@ -804,10 +806,11 @@ static int find_patch_start(const char *str)
* Return the position of the first trailer line or len if there are no
* trailers.
*/
-static int find_trailer_start(const char *buf, size_t len)
+static size_t find_trailer_start(const char *buf, size_t len)
{
const char *s;
- int end_of_title, l, only_spaces = 1;
+ ssize_t end_of_title, l;
+ int only_spaces = 1;
int recognized_prefix = 0, trailer_lines = 0, non_trailer_lines = 0;
/*
* Number of possible continuation lines encountered. This will be
@@ -838,7 +841,7 @@ static int find_trailer_start(const char *buf, size_t len)
l = last_line(buf, l)) {
const char *bol = buf + l;
const char **p;
- int separator_pos;
+ ssize_t separator_pos;
if (bol[0] == comment_line_char) {
non_trailer_lines += possible_continuation_lines;
@@ -899,14 +902,14 @@ continue_outer_loop:
}
/* Return the position of the end of the trailers. */
-static int find_trailer_end(const char *buf, size_t len)
+static size_t find_trailer_end(const char *buf, size_t len)
{
return len - ignore_non_trailer(buf, len);
}
static int ends_with_blank_line(const char *buf, size_t len)
{
- int ll = last_line(buf, len);
+ ssize_t ll = last_line(buf, len);
if (ll < 0)
return 0;
return is_blank_line(buf + ll);
@@ -939,17 +942,17 @@ static void unfold_value(struct strbuf *val)
strbuf_release(&out);
}
-static int process_input_file(FILE *outfile,
- const char *str,
- struct list_head *head,
- const struct process_trailer_options *opts)
+static size_t process_input_file(FILE *outfile,
+ const char *str,
+ struct list_head *head,
+ const struct process_trailer_options *opts)
{
struct trailer_info info;
struct strbuf tok = STRBUF_INIT;
struct strbuf val = STRBUF_INIT;
- int i;
+ size_t i;
- trailer_info_get(&info, str);
+ trailer_info_get(&info, str, opts);
/* Print lines before the trailers as is */
if (!opts->only_trailers)
@@ -1032,7 +1035,7 @@ void process_trailers(const char *file,
{
LIST_HEAD(head);
struct strbuf sb = STRBUF_INIT;
- int trailer_end;
+ size_t trailer_end;
FILE *outfile = stdout;
ensure_configured();
@@ -1066,7 +1069,8 @@ void process_trailers(const char *file,
strbuf_release(&sb);
}
-void trailer_info_get(struct trailer_info *info, const char *str)
+void trailer_info_get(struct trailer_info *info, const char *str,
+ const struct process_trailer_options *opts)
{
int patch_start, trailer_end, trailer_start;
struct strbuf **trailer_lines, **ptr;
@@ -1076,7 +1080,11 @@ void trailer_info_get(struct trailer_info *info, const char *str)
ensure_configured();
- patch_start = find_patch_start(str);
+ if (opts->no_divider)
+ patch_start = strlen(str);
+ else
+ patch_start = find_patch_start(str);
+
trailer_end = find_trailer_end(str, patch_start);
trailer_start = find_trailer_start(str, trailer_end);
@@ -1111,7 +1119,7 @@ void trailer_info_get(struct trailer_info *info, const char *str)
void trailer_info_release(struct trailer_info *info)
{
- int i;
+ size_t i;
for (i = 0; i < info->trailer_nr; i++)
free(info->trailers[i]);
free(info->trailers);
@@ -1121,7 +1129,7 @@ static void format_trailer_info(struct strbuf *out,
const struct trailer_info *info,
const struct process_trailer_options *opts)
{
- int i;
+ size_t i;
/* If we want the whole block untouched, we can take the fast path. */
if (!opts->only_trailers && !opts->unfold) {
@@ -1132,7 +1140,7 @@ static void format_trailer_info(struct strbuf *out,
for (i = 0; i < info->trailer_nr; i++) {
char *trailer = info->trailers[i];
- int separator_pos = find_separator(trailer, separators);
+ ssize_t separator_pos = find_separator(trailer, separators);
if (separator_pos >= 1) {
struct strbuf tok = STRBUF_INIT;
@@ -1158,7 +1166,7 @@ void format_trailers_from_commit(struct strbuf *out, const char *msg,
{
struct trailer_info info;
- trailer_info_get(&info, msg);
+ trailer_info_get(&info, msg, opts);
format_trailer_info(out, &info, opts);
trailer_info_release(&info);
}
diff --git a/trailer.h b/trailer.h
index 9c10026c35..b997739649 100644
--- a/trailer.h
+++ b/trailer.h
@@ -71,6 +71,7 @@ struct process_trailer_options {
int only_trailers;
int only_input;
int unfold;
+ int no_divider;
};
#define PROCESS_TRAILER_OPTIONS_INIT {0}
@@ -79,7 +80,8 @@ void process_trailers(const char *file,
const struct process_trailer_options *opts,
struct list_head *new_trailer_head);
-void trailer_info_get(struct trailer_info *info, const char *str);
+void trailer_info_get(struct trailer_info *info, const char *str,
+ const struct process_trailer_options *opts);
void trailer_info_release(struct trailer_info *info);
diff --git a/transport.c b/transport.c
index 06ffea2774..1c76d64aba 100644
--- a/transport.c
+++ b/transport.c
@@ -1228,7 +1228,7 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs)
nr_refs++;
if (rm->peer_ref &&
!is_null_oid(&rm->old_oid) &&
- !oidcmp(&rm->peer_ref->old_oid, &rm->old_oid))
+ oideq(&rm->peer_ref->old_oid, &rm->old_oid))
continue;
ALLOC_GROW(heads, nr_heads + 1, nr_alloc);
heads[nr_heads++] = rm;
diff --git a/tree-diff.c b/tree-diff.c
index 553bc0e63a..425668e1e0 100644
--- a/tree-diff.c
+++ b/tree-diff.c
@@ -491,7 +491,7 @@ static struct combine_diff_path *ll_diff_tree_paths(
continue;
/* diff(t,pi) != ΓΈ */
- if (oidcmp(t.entry.oid, tp[i].entry.oid) ||
+ if (!oideq(t.entry.oid, tp[i].entry.oid) ||
(t.entry.mode != tp[i].entry.mode))
continue;
diff --git a/unpack-trees.c b/unpack-trees.c
index 82a83dbc67..51bfac6aa0 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -336,6 +336,46 @@ static struct progress *get_progress(struct unpack_trees_options *o)
return start_delayed_progress(_("Checking out files"), total);
}
+static void setup_collided_checkout_detection(struct checkout *state,
+ struct index_state *index)
+{
+ int i;
+
+ state->clone = 1;
+ for (i = 0; i < index->cache_nr; i++)
+ index->cache[i]->ce_flags &= ~CE_MATCHED;
+}
+
+static void report_collided_checkout(struct index_state *index)
+{
+ struct string_list list = STRING_LIST_INIT_NODUP;
+ int i;
+
+ for (i = 0; i < index->cache_nr; i++) {
+ struct cache_entry *ce = index->cache[i];
+
+ if (!(ce->ce_flags & CE_MATCHED))
+ continue;
+
+ string_list_append(&list, ce->name);
+ ce->ce_flags &= ~CE_MATCHED;
+ }
+
+ list.cmp = fspathcmp;
+ string_list_sort(&list);
+
+ if (list.nr) {
+ warning(_("the following paths have collided (e.g. case-sensitive paths\n"
+ "on a case-insensitive filesystem) and only one from the same\n"
+ "colliding group is in the working tree:\n"));
+
+ for (i = 0; i < list.nr; i++)
+ fprintf(stderr, " '%s'\n", list.items[i].string);
+ }
+
+ string_list_clear(&list, 0);
+}
+
static int check_updates(struct unpack_trees_options *o)
{
unsigned cnt = 0;
@@ -345,11 +385,15 @@ static int check_updates(struct unpack_trees_options *o)
struct checkout state = CHECKOUT_INIT;
int i;
+ trace_performance_enter();
state.force = 1;
state.quiet = 1;
state.refresh_cache = 1;
state.istate = index;
+ if (o->clone)
+ setup_collided_checkout_detection(&state, index);
+
progress = get_progress(o);
if (o->update)
@@ -414,6 +458,11 @@ static int check_updates(struct unpack_trees_options *o)
errs |= finish_delayed_checkout(&state);
if (o->update)
git_attr_set_direction(GIT_ATTR_CHECKIN);
+
+ if (o->clone)
+ report_collided_checkout(index);
+
+ trace_performance_leave("check_updates");
return errs != 0;
}
@@ -630,7 +679,114 @@ static int switch_cache_bottom(struct traverse_info *info)
static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k)
{
- return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid);
+ return name_j->oid && name_k->oid && oideq(name_j->oid, name_k->oid);
+}
+
+static int all_trees_same_as_cache_tree(int n, unsigned long dirmask,
+ struct name_entry *names,
+ struct traverse_info *info)
+{
+ struct unpack_trees_options *o = info->data;
+ int i;
+
+ if (!o->merge || dirmask != ((1 << n) - 1))
+ return 0;
+
+ for (i = 1; i < n; i++)
+ if (!are_same_oid(names, names + i))
+ return 0;
+
+ return cache_tree_matches_traversal(o->src_index->cache_tree, names, info);
+}
+
+static int index_pos_by_traverse_info(struct name_entry *names,
+ struct traverse_info *info)
+{
+ struct unpack_trees_options *o = info->data;
+ int len = traverse_path_len(info, names);
+ char *name = xmalloc(len + 1 /* slash */ + 1 /* NUL */);
+ int pos;
+
+ make_traverse_path(name, info, names);
+ name[len++] = '/';
+ name[len] = '\0';
+ pos = index_name_pos(o->src_index, name, len);
+ if (pos >= 0)
+ BUG("This is a directory and should not exist in index");
+ pos = -pos - 1;
+ if (!starts_with(o->src_index->cache[pos]->name, name) ||
+ (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name)))
+ BUG("pos must point at the first entry in this directory");
+ free(name);
+ return pos;
+}
+
+/*
+ * Fast path if we detect that all trees are the same as cache-tree at this
+ * path. We'll walk these trees in an iterative loop using cache-tree/index
+ * instead of ODB since we already know what these trees contain.
+ */
+static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names,
+ struct name_entry *names,
+ struct traverse_info *info)
+{
+ struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
+ struct unpack_trees_options *o = info->data;
+ struct cache_entry *tree_ce = NULL;
+ int ce_len = 0;
+ int i, d;
+
+ if (!o->merge)
+ BUG("We need cache-tree to do this optimization");
+
+ /*
+ * Do what unpack_callback() and unpack_nondirectories() normally
+ * do. But we walk all paths in an iterative loop instead.
+ *
+ * D/F conflicts and higher stage entries are not a concern
+ * because cache-tree would be invalidated and we would never
+ * get here in the first place.
+ */
+ for (i = 0; i < nr_entries; i++) {
+ int new_ce_len, len, rc;
+
+ src[0] = o->src_index->cache[pos + i];
+
+ len = ce_namelen(src[0]);
+ new_ce_len = cache_entry_size(len);
+
+ if (new_ce_len > ce_len) {
+ new_ce_len <<= 1;
+ tree_ce = xrealloc(tree_ce, new_ce_len);
+ memset(tree_ce, 0, new_ce_len);
+ ce_len = new_ce_len;
+
+ tree_ce->ce_flags = create_ce_flags(0);
+
+ for (d = 1; d <= nr_names; d++)
+ src[d] = tree_ce;
+ }
+
+ tree_ce->ce_mode = src[0]->ce_mode;
+ tree_ce->ce_namelen = len;
+ oidcpy(&tree_ce->oid, &src[0]->oid);
+ memcpy(tree_ce->name, src[0]->name, len + 1);
+
+ rc = call_unpack_fn((const struct cache_entry * const *)src, o);
+ if (rc < 0) {
+ free(tree_ce);
+ return rc;
+ }
+
+ mark_ce_used(src[0], o);
+ }
+ free(tree_ce);
+ if (o->debug_unpack)
+ printf("Unpacked %d entries from %s to %s using cache-tree\n",
+ nr_entries,
+ o->src_index->cache[pos]->name,
+ o->src_index->cache[pos + nr_entries - 1]->name);
+ return 0;
}
static int traverse_trees_recursive(int n, unsigned long dirmask,
@@ -644,6 +800,27 @@ static int traverse_trees_recursive(int n, unsigned long dirmask,
void *buf[MAX_UNPACK_TREES];
struct traverse_info newinfo;
struct name_entry *p;
+ int nr_entries;
+
+ nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info);
+ if (nr_entries > 0) {
+ struct unpack_trees_options *o = info->data;
+ int pos = index_pos_by_traverse_info(names, info);
+
+ if (!o->merge || df_conflicts)
+ BUG("Wrong condition to get here buddy");
+
+ /*
+ * All entries up to 'pos' must have been processed
+ * (i.e. marked CE_UNPACKED) at this point. But to be safe,
+ * save and restore cache_bottom anyway to not miss
+ * unprocessed entries before 'pos'.
+ */
+ bottom = o->cache_bottom;
+ ret = traverse_by_cache_tree(pos, nr_entries, n, names, info);
+ o->cache_bottom = bottom;
+ return ret;
+ }
p = names;
while (!p->mode)
@@ -810,6 +987,11 @@ static struct cache_entry *create_ce_entry(const struct traverse_info *info,
return ce;
}
+/*
+ * Note that traverse_by_cache_tree() duplicates some logic in this function
+ * without actually calling it. If you change the logic here you may need to
+ * check and change there as well.
+ */
static int unpack_nondirectories(int n, unsigned long mask,
unsigned long dirmask,
struct cache_entry **src,
@@ -1002,6 +1184,11 @@ static void debug_unpack_callback(int n,
debug_name_entry(i, names + i);
}
+/*
+ * Note that traverse_by_cache_tree() duplicates some logic in this function
+ * without actually calling it. If you change the logic here you may need to
+ * check and change there as well.
+ */
static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
{
struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
@@ -1289,6 +1476,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
if (len > MAX_UNPACK_TREES)
die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
+ trace_performance_enter();
memset(&el, 0, sizeof(el));
if (!core_apply_sparse_checkout || !o->update)
o->skip_sparse_checkout = 1;
@@ -1361,7 +1549,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
}
}
- if (traverse_trees(len, t, &info) < 0)
+ trace_performance_enter();
+ ret = traverse_trees(len, t, &info);
+ trace_performance_leave("traverse_trees");
+ if (ret < 0)
goto return_failed;
}
@@ -1436,7 +1627,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
ret = check_updates(o) ? (-2) : 0;
if (o->dst_index) {
+ move_index_extensions(&o->result, o->src_index);
if (!ret) {
+ if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
+ cache_tree_verify(&o->result);
if (!o->result.cache_tree)
o->result.cache_tree = cache_tree();
if (!cache_tree_fully_valid(o->result.cache_tree))
@@ -1444,7 +1638,6 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
}
- move_index_extensions(&o->result, o->src_index);
discard_index(o->dst_index);
*o->dst_index = o->result;
} else {
@@ -1453,6 +1646,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
o->src_index = NULL;
done:
+ trace_performance_leave("unpack_trees");
clear_exclude_list(&el);
return ret;
@@ -1484,7 +1678,7 @@ static int same(const struct cache_entry *a, const struct cache_entry *b)
if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)
return 0;
return a->ce_mode == b->ce_mode &&
- !oidcmp(&a->oid, &b->oid);
+ oideq(&a->oid, &b->oid);
}
@@ -1616,7 +1810,7 @@ static int verify_clean_subdirectory(const struct cache_entry *ce,
* If we are not going to update the submodule, then
* we don't care.
*/
- if (!sub_head && !oidcmp(&oid, &ce->oid))
+ if (!sub_head && oideq(&oid, &ce->oid))
return 0;
return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid),
ce, error_type, o);
@@ -1644,6 +1838,7 @@ static int verify_clean_subdirectory(const struct cache_entry *ce,
if (verify_uptodate(ce2, o))
return -1;
add_entry(o, ce2, CE_REMOVE, 0);
+ invalidate_ce_path(ce, o);
mark_ce_used(ce2, o);
}
cnt++;
@@ -1903,6 +2098,8 @@ static int keep_entry(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
add_entry(o, ce, 0, 0);
+ if (ce_stage(ce))
+ invalidate_ce_path(ce, o);
return 1;
}
diff --git a/unpack-trees.h b/unpack-trees.h
index 847f217dba..0135080a7b 100644
--- a/unpack-trees.h
+++ b/unpack-trees.h
@@ -45,6 +45,7 @@ struct unpack_trees_options {
unsigned int reset,
merge,
update,
+ clone,
index_only,
nontrivial_merge,
trivial_merges_only,
diff --git a/upload-pack.c b/upload-pack.c
index 82b393ec31..62a1000f44 100644
--- a/upload-pack.c
+++ b/upload-pack.c
@@ -24,13 +24,13 @@
#include "quote.h"
#include "upload-pack.h"
#include "serve.h"
+#include "commit-reach.h"
/* Remember to update object flag allocation in object.h */
#define THEY_HAVE (1u << 11)
#define OUR_REF (1u << 12)
#define WANTED (1u << 13)
#define COMMON_KNOWN (1u << 14)
-#define REACHABLE (1u << 15)
#define SHALLOW (1u << 16)
#define NOT_SHALLOW (1u << 17)
@@ -337,64 +337,16 @@ static int got_oid(const char *hex, struct object_id *oid)
return 0;
}
-static int reachable(struct commit *want)
-{
- struct prio_queue work = { compare_commits_by_commit_date };
-
- prio_queue_put(&work, want);
- while (work.nr) {
- struct commit_list *list;
- struct commit *commit = prio_queue_get(&work);
-
- if (commit->object.flags & THEY_HAVE) {
- want->object.flags |= COMMON_KNOWN;
- break;
- }
- if (!commit->object.parsed)
- parse_object(the_repository, &commit->object.oid);
- if (commit->object.flags & REACHABLE)
- continue;
- commit->object.flags |= REACHABLE;
- if (commit->date < oldest_have)
- continue;
- for (list = commit->parents; list; list = list->next) {
- struct commit *parent = list->item;
- if (!(parent->object.flags & REACHABLE))
- prio_queue_put(&work, parent);
- }
- }
- want->object.flags |= REACHABLE;
- clear_commit_marks(want, REACHABLE);
- clear_prio_queue(&work);
- return (want->object.flags & COMMON_KNOWN);
-}
-
static int ok_to_give_up(void)
{
- int i;
+ uint32_t min_generation = GENERATION_NUMBER_ZERO;
if (!have_obj.nr)
return 0;
- for (i = 0; i < want_obj.nr; i++) {
- struct object *want = want_obj.objects[i].item;
-
- if (want->flags & COMMON_KNOWN)
- continue;
- want = deref_tag(the_repository, want, "a want line", 0);
- if (!want || want->type != OBJ_COMMIT) {
- /* no way to tell if this is reachable by
- * looking at the ancestry chain alone, so
- * leave a note to ourselves not to worry about
- * this object anymore.
- */
- want_obj.objects[i].item->flags |= COMMON_KNOWN;
- continue;
- }
- if (!reachable((struct commit *)want))
- return 0;
- }
- return 1;
+ return can_all_from_reach_with_flag(&want_obj, THEY_HAVE,
+ COMMON_KNOWN, oldest_have,
+ min_generation);
}
static int get_common_commits(void)
diff --git a/userdiff.c b/userdiff.c
index f3f4be579c..f565f6731d 100644
--- a/userdiff.c
+++ b/userdiff.c
@@ -278,8 +278,7 @@ struct userdiff_driver *userdiff_find_by_path(const char *path)
check = attr_check_initl("diff", NULL);
if (!path)
return NULL;
- if (git_check_attr(&the_index, path, check))
- return NULL;
+ git_check_attr(&the_index, path, check);
if (ATTR_TRUE(check->items[0].value))
return &driver_true;
diff --git a/worktree.c b/worktree.c
index 97cda5f97b..b0d0b5426d 100644
--- a/worktree.c
+++ b/worktree.c
@@ -217,7 +217,11 @@ struct worktree *find_worktree(struct worktree **list,
if (prefix)
arg = to_free = prefix_filename(prefix, arg);
- path = real_pathdup(arg, 1);
+ path = real_pathdup(arg, 0);
+ if (!path) {
+ free(to_free);
+ return NULL;
+ }
for (; *list; list++)
if (!fspathcmp(path, real_path((*list)->path)))
break;
diff --git a/ws.c b/ws.c
index 5b67b426e7..a64ab51e09 100644
--- a/ws.c
+++ b/ws.c
@@ -74,35 +74,31 @@ unsigned parse_whitespace_rule(const char *string)
unsigned whitespace_rule(const char *pathname)
{
static struct attr_check *attr_whitespace_rule;
+ const char *value;
if (!attr_whitespace_rule)
attr_whitespace_rule = attr_check_initl("whitespace", NULL);
- if (!git_check_attr(&the_index, pathname, attr_whitespace_rule)) {
- const char *value;
-
- value = attr_whitespace_rule->items[0].value;
- if (ATTR_TRUE(value)) {
- /* true (whitespace) */
- unsigned all_rule = ws_tab_width(whitespace_rule_cfg);
- int i;
- for (i = 0; i < ARRAY_SIZE(whitespace_rule_names); i++)
- if (!whitespace_rule_names[i].loosens_error &&
- !whitespace_rule_names[i].exclude_default)
- all_rule |= whitespace_rule_names[i].rule_bits;
- return all_rule;
- } else if (ATTR_FALSE(value)) {
- /* false (-whitespace) */
- return ws_tab_width(whitespace_rule_cfg);
- } else if (ATTR_UNSET(value)) {
- /* reset to default (!whitespace) */
- return whitespace_rule_cfg;
- } else {
- /* string */
- return parse_whitespace_rule(value);
- }
- } else {
+ git_check_attr(&the_index, pathname, attr_whitespace_rule);
+ value = attr_whitespace_rule->items[0].value;
+ if (ATTR_TRUE(value)) {
+ /* true (whitespace) */
+ unsigned all_rule = ws_tab_width(whitespace_rule_cfg);
+ int i;
+ for (i = 0; i < ARRAY_SIZE(whitespace_rule_names); i++)
+ if (!whitespace_rule_names[i].loosens_error &&
+ !whitespace_rule_names[i].exclude_default)
+ all_rule |= whitespace_rule_names[i].rule_bits;
+ return all_rule;
+ } else if (ATTR_FALSE(value)) {
+ /* false (-whitespace) */
+ return ws_tab_width(whitespace_rule_cfg);
+ } else if (ATTR_UNSET(value)) {
+ /* reset to default (!whitespace) */
return whitespace_rule_cfg;
+ } else {
+ /* string */
+ return parse_whitespace_rule(value);
}
}
diff --git a/wt-status.c b/wt-status.c
index 5ffab61015..1c8746d0ea 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -453,8 +453,8 @@ static void wt_status_collect_changed_cb(struct diff_queue_struct *q,
d->worktree_status = p->status;
if (S_ISGITLINK(p->two->mode)) {
d->dirty_submodule = p->two->dirty_submodule;
- d->new_submodule_commits = !!oidcmp(&p->one->oid,
- &p->two->oid);
+ d->new_submodule_commits = !oideq(&p->one->oid,
+ &p->two->oid);
if (s->status_format == STATUS_FORMAT_SHORT)
d->worktree_status = short_submodule_status(d);
}
@@ -1487,10 +1487,10 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
if (dwim_ref(cb.buf.buf, cb.buf.len, &oid, &ref) == 1 &&
/* sha1 is a commit? match without further lookup */
- (!oidcmp(&cb.noid, &oid) ||
+ (oideq(&cb.noid, &oid) ||
/* perhaps sha1 is a tag, try to dereference to a commit */
((commit = lookup_commit_reference_gently(the_repository, &oid, 1)) != NULL &&
- !oidcmp(&cb.noid, &commit->object.oid)))) {
+ oideq(&cb.noid, &commit->object.oid)))) {
const char *from = ref;
if (!skip_prefix(from, "refs/tags/", &from))
skip_prefix(from, "refs/remotes/", &from);
@@ -1500,7 +1500,7 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
xstrdup(find_unique_abbrev(&cb.noid, DEFAULT_ABBREV));
oidcpy(&state->detached_oid, &cb.noid);
state->detached_at = !get_oid("HEAD", &oid) &&
- !oidcmp(&oid, &state->detached_oid);
+ oideq(&oid, &state->detached_oid);
free(ref);
strbuf_release(&cb.buf);
diff --git a/xdiff-interface.c b/xdiff-interface.c
index ec6e574e4a..e7af95db86 100644
--- a/xdiff-interface.c
+++ b/xdiff-interface.c
@@ -186,7 +186,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid)
unsigned long size;
enum object_type type;
- if (!oidcmp(oid, &null_oid)) {
+ if (oideq(oid, &null_oid)) {
ptr->ptr = xstrdup("");
ptr->size = 0;
return;