summaryrefslogtreecommitdiff
path: root/t/perf
diff options
context:
space:
mode:
Diffstat (limited to 't/perf')
-rw-r--r--t/perf/README25
-rwxr-xr-xt/perf/aggregate.perl69
-rwxr-xr-xt/perf/p0001-rev-list.sh18
-rwxr-xr-xt/perf/p1450-fsck.sh13
-rwxr-xr-xt/perf/p1451-fsck-skip-list.sh40
-rwxr-xr-xt/perf/p3400-rebase.sh10
-rwxr-xr-xt/perf/p5302-pack-index.sh31
-rwxr-xr-xt/perf/p5304-prune.sh35
-rwxr-xr-xt/perf/p5310-pack-bitmaps.sh3
-rwxr-xr-xt/perf/p5311-pack-bitmaps-fetch.sh44
-rwxr-xr-xt/perf/p5600-partial-clone.sh26
-rw-r--r--t/perf/perf-lib.sh112
12 files changed, 341 insertions, 85 deletions
diff --git a/t/perf/README b/t/perf/README
index 21321a0f36..be12090c38 100644
--- a/t/perf/README
+++ b/t/perf/README
@@ -168,3 +168,28 @@ that
While we have tried to make sure that it can cope with embedded
whitespace and other special characters, it will not work with
multi-line data.
+
+Rather than tracking the performance by run-time as `test_perf` does, you
+may also track output size by using `test_size`. The stdout of the
+function should be a single numeric value, which will be captured and
+shown in the aggregated output. For example:
+
+ test_perf 'time foo' '
+ ./foo >foo.out
+ '
+
+ test_size 'output size'
+ wc -c <foo.out
+ '
+
+might produce output like:
+
+ Test origin HEAD
+ -------------------------------------------------------------
+ 1234.1 time foo 0.37(0.79+0.02) 0.26(0.51+0.02) -29.7%
+ 1234.2 output size 4.3M 3.6M -14.7%
+
+The item being measured (and its units) is up to the test; the context
+and the test title should make it clear to the user whether bigger or
+smaller numbers are better. Unlike test_perf, the test code will only be
+run once, since output sizes tend to be more deterministic than timings.
diff --git a/t/perf/aggregate.perl b/t/perf/aggregate.perl
index bc865160e7..494907a892 100755
--- a/t/perf/aggregate.perl
+++ b/t/perf/aggregate.perl
@@ -13,27 +13,42 @@ sub get_times {
my $line = <$fh>;
return undef if not defined $line;
close $fh or die "cannot close $name: $!";
- $line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/
- or die "bad input line: $line";
- my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
- return ($rt, $4, $5);
+ # times
+ if ($line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/) {
+ my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
+ return ($rt, $4, $5);
+ # size
+ } elsif ($line =~ /^\d+$/) {
+ return $&;
+ } else {
+ die "bad input line: $line";
+ }
+}
+
+sub relative_change {
+ my ($r, $firstr) = @_;
+ if ($firstr > 0) {
+ return sprintf "%+.1f%%", 100.0*($r-$firstr)/$firstr;
+ } elsif ($r == 0) {
+ return "=";
+ } else {
+ return "+inf";
+ }
}
sub format_times {
my ($r, $u, $s, $firstr) = @_;
+ # no value means we did not finish the test
if (!defined $r) {
return "<missing>";
}
- my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
- if (defined $firstr) {
- if ($firstr > 0) {
- $out .= sprintf " %+.1f%%", 100.0*($r-$firstr)/$firstr;
- } elsif ($r == 0) {
- $out .= " =";
- } else {
- $out .= " +inf";
- }
+ # a single value means we have a size, not times
+ if (!defined $u) {
+ return format_size($r, $firstr);
}
+ # otherwise, we have real/user/system times
+ my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
+ $out .= ' ' . relative_change($r, $firstr) if defined $firstr;
return $out;
}
@@ -51,6 +66,25 @@ EOT
exit(1);
}
+sub human_size {
+ my $n = shift;
+ my @units = ('', qw(K M G));
+ while ($n > 900 && @units > 1) {
+ $n /= 1000;
+ shift @units;
+ }
+ return $n unless length $units[0];
+ return sprintf '%.1f%s', $n, $units[0];
+}
+
+sub format_size {
+ my ($size, $first) = @_;
+ # match the width of a time: 0.00(0.00+0.00)
+ my $out = sprintf '%15s', human_size($size);
+ $out .= ' ' . relative_change($size, $first) if defined $first;
+ return $out;
+}
+
my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
$codespeed, $sortby, $subsection, $reponame);
@@ -181,7 +215,14 @@ sub print_default_results {
my $firstr;
for my $i (0..$#dirs) {
my $d = $dirs[$i];
- $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
+ my $base = "$resultsdir/$prefixes{$d}$t";
+ $times{$prefixes{$d}.$t} = [];
+ foreach my $type (qw(times size)) {
+ if (-e "$base.$type") {
+ $times{$prefixes{$d}.$t} = [get_times("$base.$type")];
+ last;
+ }
+ }
my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
my $w = length format_times($r,$u,$s,$firstr);
$colwidth[$i] = $w if $w > $colwidth[$i];
diff --git a/t/perf/p0001-rev-list.sh b/t/perf/p0001-rev-list.sh
index ebf172401b..3042a85666 100755
--- a/t/perf/p0001-rev-list.sh
+++ b/t/perf/p0001-rev-list.sh
@@ -14,6 +14,24 @@ test_perf 'rev-list --all --objects' '
git rev-list --all --objects >/dev/null
'
+test_perf 'rev-list --parents' '
+ git rev-list --parents HEAD >/dev/null
+'
+
+test_expect_success 'create dummy file' '
+ echo unlikely-to-already-be-there >dummy &&
+ git add dummy &&
+ git commit -m dummy
+'
+
+test_perf 'rev-list -- dummy' '
+ git rev-list HEAD -- dummy
+'
+
+test_perf 'rev-list --parents -- dummy' '
+ git rev-list --parents HEAD -- dummy
+'
+
test_expect_success 'create new unreferenced commit' '
commit=$(git commit-tree HEAD^{tree} -p HEAD) &&
test_export commit
diff --git a/t/perf/p1450-fsck.sh b/t/perf/p1450-fsck.sh
new file mode 100755
index 0000000000..ae1b84198b
--- /dev/null
+++ b/t/perf/p1450-fsck.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+test_description='Test fsck performance'
+
+. ./perf-lib.sh
+
+test_perf_large_repo
+
+test_perf 'fsck' '
+ git fsck
+'
+
+test_done
diff --git a/t/perf/p1451-fsck-skip-list.sh b/t/perf/p1451-fsck-skip-list.sh
new file mode 100755
index 0000000000..c2b97d2487
--- /dev/null
+++ b/t/perf/p1451-fsck-skip-list.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+test_description='Test fsck skipList performance'
+
+. ./perf-lib.sh
+
+test_perf_fresh_repo
+
+n=1000000
+
+test_expect_success "setup $n bad commits" '
+ for i in $(test_seq 1 $n)
+ do
+ echo "commit refs/heads/master" &&
+ echo "committer C <c@example.com> 1234567890 +0000" &&
+ echo "data <<EOF" &&
+ echo "$i.Q." &&
+ echo "EOF"
+ done | q_to_nul | git fast-import
+'
+
+skip=0
+while test $skip -le $n
+do
+ test_expect_success "create skipList for $skip bad commits" '
+ git log --format=%H --max-count=$skip |
+ sort >skiplist
+ '
+
+ test_perf "fsck with $skip skipped bad commits" '
+ git -c fsck.skipList=skiplist fsck
+ '
+
+ case $skip in
+ 0) skip=1 ;;
+ *) skip=${skip}0 ;;
+ esac
+done
+
+test_done
diff --git a/t/perf/p3400-rebase.sh b/t/perf/p3400-rebase.sh
index ce271ca4c1..d202aaed06 100755
--- a/t/perf/p3400-rebase.sh
+++ b/t/perf/p3400-rebase.sh
@@ -6,9 +6,9 @@ test_description='Tests rebase performance'
test_perf_default_repo
test_expect_success 'setup rebasing on top of a lot of changes' '
- git checkout -f -b base &&
- git checkout -b to-rebase &&
- git checkout -b upstream &&
+ git checkout -f -B base &&
+ git checkout -B to-rebase &&
+ git checkout -B upstream &&
for i in $(seq 100)
do
# simulate huge diffs
@@ -35,8 +35,8 @@ test_perf 'rebase on top of a lot of unrelated changes' '
test_expect_success 'setup rebasing many changes without split-index' '
git config core.splitIndex false &&
- git checkout -b upstream2 to-rebase &&
- git checkout -b to-rebase2 upstream
+ git checkout -B upstream2 to-rebase &&
+ git checkout -B to-rebase2 upstream
'
test_perf 'rebase a lot of unrelated changes without split-index' '
diff --git a/t/perf/p5302-pack-index.sh b/t/perf/p5302-pack-index.sh
index 99bdb16c85..a9b3e112d9 100755
--- a/t/perf/p5302-pack-index.sh
+++ b/t/perf/p5302-pack-index.sh
@@ -13,35 +13,40 @@ test_expect_success 'repack' '
export PACK
'
-test_expect_success 'create target repositories' '
- for repo in t1 t2 t3 t4 t5 t6
- do
- git init --bare $repo
- done
-'
-
test_perf 'index-pack 0 threads' '
- GIT_DIR=t1 git index-pack --threads=1 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=1 --stdin < $PACK
'
test_perf 'index-pack 1 thread ' '
- GIT_DIR=t2 GIT_FORCE_THREADS=1 git index-pack --threads=1 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git GIT_FORCE_THREADS=1 git index-pack --threads=1 --stdin < $PACK
'
test_perf 'index-pack 2 threads' '
- GIT_DIR=t3 git index-pack --threads=2 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=2 --stdin < $PACK
'
test_perf 'index-pack 4 threads' '
- GIT_DIR=t4 git index-pack --threads=4 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=4 --stdin < $PACK
'
test_perf 'index-pack 8 threads' '
- GIT_DIR=t5 git index-pack --threads=8 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=8 --stdin < $PACK
'
test_perf 'index-pack default number of threads' '
- GIT_DIR=t6 git index-pack --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --stdin < $PACK
'
test_done
diff --git a/t/perf/p5304-prune.sh b/t/perf/p5304-prune.sh
new file mode 100755
index 0000000000..83baedb8a4
--- /dev/null
+++ b/t/perf/p5304-prune.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+test_description='performance tests of prune'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'remove reachable loose objects' '
+ git repack -ad
+'
+
+test_expect_success 'remove unreachable loose objects' '
+ git prune
+'
+
+test_expect_success 'confirm there are no loose objects' '
+ git count-objects | grep ^0
+'
+
+test_perf 'prune with no objects' '
+ git prune
+'
+
+test_expect_success 'repack with bitmaps' '
+ git repack -adb
+'
+
+# We have to create the object in each trial run, since otherwise
+# runs after the first see no object and just skip the traversal entirely!
+test_perf 'prune with bitmaps' '
+ echo "probably not present in repo" | git hash-object -w --stdin &&
+ git prune
+'
+
+test_done
diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh
index bb91dbb173..6a3a42531b 100755
--- a/t/perf/p5310-pack-bitmaps.sh
+++ b/t/perf/p5310-pack-bitmaps.sh
@@ -12,8 +12,7 @@ test_perf_large_repo
# We intentionally use the deprecated pack.writebitmaps
# config so that we can test against older versions of git.
test_expect_success 'setup bitmap config' '
- git config pack.writebitmaps true &&
- git config pack.writebitmaphashcache true
+ git config pack.writebitmaps true
'
test_perf 'repack to disk' '
diff --git a/t/perf/p5311-pack-bitmaps-fetch.sh b/t/perf/p5311-pack-bitmaps-fetch.sh
new file mode 100755
index 0000000000..47c3fd7581
--- /dev/null
+++ b/t/perf/p5311-pack-bitmaps-fetch.sh
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+test_description='performance of fetches from bitmapped packs'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'create bitmapped server repo' '
+ git config pack.writebitmaps true &&
+ git repack -ad
+'
+
+# simulate a fetch from a repository that last fetched N days ago, for
+# various values of N. We do so by following the first-parent chain,
+# and assume the first entry in the chain that is N days older than the current
+# HEAD is where the HEAD would have been then.
+for days in 1 2 4 8 16 32 64 128; do
+ title=$(printf '%10s' "($days days)")
+ test_expect_success "setup revs from $days days ago" '
+ now=$(git log -1 --format=%ct HEAD) &&
+ then=$(($now - ($days * 86400))) &&
+ tip=$(git rev-list -1 --first-parent --until=$then HEAD) &&
+ {
+ echo HEAD &&
+ echo ^$tip
+ } >revs
+ '
+
+ test_perf "server $title" '
+ git pack-objects --stdout --revs \
+ --thin --delta-base-offset \
+ <revs >tmp.pack
+ '
+
+ test_size "size $title" '
+ wc -c <tmp.pack
+ '
+
+ test_perf "client $title" '
+ git index-pack --stdin --fix-thin <tmp.pack
+ '
+done
+
+test_done
diff --git a/t/perf/p5600-partial-clone.sh b/t/perf/p5600-partial-clone.sh
new file mode 100755
index 0000000000..3e04bd2ae1
--- /dev/null
+++ b/t/perf/p5600-partial-clone.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+test_description='performance of partial clones'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'enable server-side config' '
+ git config uploadpack.allowFilter true &&
+ git config uploadpack.allowAnySHA1InWant true
+'
+
+test_perf 'clone without blobs' '
+ rm -rf bare.git &&
+ git clone --no-local --bare --filter=blob:none . bare.git
+'
+
+test_perf 'checkout of result' '
+ rm -rf worktree &&
+ mkdir -p worktree/.git &&
+ tar -C bare.git -cf - . | tar -C worktree/.git -xf - &&
+ git -C worktree config core.bare false &&
+ git -C worktree checkout -f
+'
+
+test_done
diff --git a/t/perf/perf-lib.sh b/t/perf/perf-lib.sh
index e4c343a6b7..169f92eae3 100644
--- a/t/perf/perf-lib.sh
+++ b/t/perf/perf-lib.sh
@@ -17,37 +17,25 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/ .
-# do the --tee work early; it otherwise confuses our careful
-# GIT_BUILD_DIR mangling
-case "$GIT_TEST_TEE_STARTED, $* " in
-done,*)
- # do not redirect again
- ;;
-*' --tee '*|*' --va'*)
- mkdir -p test-results
- BASE=test-results/$(basename "$0" .sh)
- (GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1;
- echo $? > $BASE.exit) | tee $BASE.out
- test "$(cat $BASE.exit)" = 0
- exit
- ;;
-esac
-
+# These variables must be set before the inclusion of test-lib.sh below,
+# because it will change our working directory.
TEST_DIRECTORY=$(pwd)/..
TEST_OUTPUT_DIRECTORY=$(pwd)
-if test -z "$GIT_TEST_INSTALLED"; then
- perf_results_prefix=
-else
- perf_results_prefix=$(printf "%s" "${GIT_TEST_INSTALLED%/bin-wrappers}" | tr -c "[a-zA-Z0-9]" "[_*]")"."
- # make the tested dir absolute
- GIT_TEST_INSTALLED=$(cd "$GIT_TEST_INSTALLED" && pwd)
-fi
+ABSOLUTE_GIT_TEST_INSTALLED=$(
+ test -n "$GIT_TEST_INSTALLED" && cd "$GIT_TEST_INSTALLED" && pwd)
TEST_NO_CREATE_REPO=t
TEST_NO_MALLOC_CHECK=t
. ../test-lib.sh
+if test -z "$GIT_TEST_INSTALLED"; then
+ perf_results_prefix=
+else
+ perf_results_prefix=$(printf "%s" "${GIT_TEST_INSTALLED%/bin-wrappers}" | tr -c "[a-zA-Z0-9]" "[_*]")"."
+ GIT_TEST_INSTALLED=$ABSOLUTE_GIT_TEST_INSTALLED
+fi
+
# Variables from test-lib that are normally internal to the tests; we
# need to export them for test_perf subshells
export TEST_DIRECTORY TRASH_DIRECTORY GIT_BUILD_DIR GIT_TEST_CMP
@@ -82,7 +70,7 @@ test_perf_do_repo_symlink_config_ () {
test_perf_create_repo_from () {
test "$#" = 2 ||
- error "bug in the test script: not 2 parameters to test-create-repo"
+ BUG "not 2 parameters to test-create-repo"
repo="$1"
source="$2"
source_git="$("$MODERN_GIT" -C "$source" rev-parse --git-dir)"
@@ -179,47 +167,69 @@ exit $ret' >&3 2>&4
return "$eval_ret"
}
-
-test_perf () {
+test_wrapper_ () {
+ test_wrapper_func_=$1; shift
test_start_
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 2 ||
- error "bug in the test script: not 2 or 3 parameters to test-expect-success"
+ BUG "not 2 or 3 parameters to test-expect-success"
export test_prereq
if ! test_skip "$@"
then
base=$(basename "$0" .sh)
echo "$test_count" >>"$perf_results_dir"/$base.subtests
echo "$1" >"$perf_results_dir"/$base.$test_count.descr
- if test -z "$verbose"; then
- printf "%s" "perf $test_count - $1:"
- else
- echo "perf $test_count - $1:"
- fi
- for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
- say >&3 "running: $2"
- if test_run_perf_ "$2"
- then
- if test -z "$verbose"; then
- printf " %s" "$i"
- else
- echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
- fi
+ base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
+ "$test_wrapper_func_" "$@"
+ fi
+
+ test_finish_
+}
+
+test_perf_ () {
+ if test -z "$verbose"; then
+ printf "%s" "perf $test_count - $1:"
+ else
+ echo "perf $test_count - $1:"
+ fi
+ for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
+ say >&3 "running: $2"
+ if test_run_perf_ "$2"
+ then
+ if test -z "$verbose"; then
+ printf " %s" "$i"
else
- test -z "$verbose" && echo
- test_failure_ "$@"
- break
+ echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
fi
- done
- if test -z "$verbose"; then
- echo " ok"
else
- test_ok_ "$1"
+ test -z "$verbose" && echo
+ test_failure_ "$@"
+ break
fi
- base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
- "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+ done
+ if test -z "$verbose"; then
+ echo " ok"
+ else
+ test_ok_ "$1"
fi
- test_finish_
+ "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+}
+
+test_perf () {
+ test_wrapper_ test_perf_ "$@"
+}
+
+test_size_ () {
+ say >&3 "running: $2"
+ if test_eval_ "$2" 3>"$base".size; then
+ test_ok_ "$1"
+ else
+ test_failure_ "$@"
+ fi
+}
+
+test_size () {
+ test_wrapper_ test_size_ "$@"
}
# We extend test_done to print timings at the end (./run disables this