summaryrefslogtreecommitdiff
path: root/t/test-lib-functions.sh
diff options
context:
space:
mode:
Diffstat (limited to 't/test-lib-functions.sh')
-rw-r--r--t/test-lib-functions.sh98
1 files changed, 78 insertions, 20 deletions
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index 0698ce7908..8d99eb303f 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -145,6 +145,14 @@ test_pause () {
fi
}
+# Wrap git in gdb. Adding this to a command can make it easier to
+# understand what is going on in a failing test.
+#
+# Example: "debug git checkout master".
+debug () {
+ GIT_TEST_GDB=1 "$@"
+}
+
# Call test_commit with the arguments "<message> [<file> [<contents> [<tag>]]]"
#
# This will commit a file with the given contents and the given commit
@@ -201,7 +209,14 @@ test_chmod () {
# Unset a configuration variable, but don't fail if it doesn't exist.
test_unconfig () {
- git config --unset-all "$@"
+ config_dir=
+ if test "$1" = -C
+ then
+ shift
+ config_dir=$1
+ shift
+ fi
+ git ${config_dir:+-C "$config_dir"} config --unset-all "$@"
config_status=$?
case "$config_status" in
5) # ok, nothing to unset
@@ -213,8 +228,15 @@ test_unconfig () {
# Set git config, automatically unsetting it after the test is over.
test_config () {
- test_when_finished "test_unconfig '$1'" &&
- git config "$@"
+ config_dir=
+ if test "$1" = -C
+ then
+ shift
+ config_dir=$1
+ shift
+ fi
+ test_when_finished "test_unconfig ${config_dir:+-C '$config_dir'} '$1'" &&
+ git ${config_dir:+-C "$config_dir"} config "$@"
}
test_config_global () {
@@ -348,11 +370,18 @@ test_declared_prereq () {
return 1
}
+test_verify_prereq () {
+ test -z "$test_prereq" ||
+ expr >/dev/null "$test_prereq" : '[A-Z0-9_,!]*$' ||
+ error "bug in the test script: '$test_prereq' does not look like a prereq"
+}
+
test_expect_failure () {
test_start_
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 2 ||
error "bug in the test script: not 2 or 3 parameters to test-expect-failure"
+ test_verify_prereq
export test_prereq
if ! test_skip "$@"
then
@@ -372,6 +401,7 @@ test_expect_success () {
test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
test "$#" = 2 ||
error "bug in the test script: not 2 or 3 parameters to test-expect-success"
+ test_verify_prereq
export test_prereq
if ! test_skip "$@"
then
@@ -400,6 +430,7 @@ test_external () {
error >&5 "bug in the test script: not 3 or 4 parameters to test_external"
descr="$1"
shift
+ test_verify_prereq
export test_prereq
if ! test_skip "$descr" "$@"
then
@@ -478,7 +509,7 @@ test_external_without_stderr () {
test_path_is_file () {
if ! test -f "$1"
then
- echo "File $1 doesn't exist. $*"
+ echo "File $1 doesn't exist. $2"
false
fi
}
@@ -486,7 +517,7 @@ test_path_is_file () {
test_path_is_dir () {
if ! test -d "$1"
then
- echo "Directory $1 doesn't exist. $*"
+ echo "Directory $1 doesn't exist. $2"
false
fi
}
@@ -538,6 +569,21 @@ test_line_count () {
fi
}
+# Returns success if a comma separated string of keywords ($1) contains a
+# given keyword ($2).
+# Examples:
+# `list_contains "foo,bar" bar` returns 0
+# `list_contains "foo" bar` returns 1
+
+list_contains () {
+ case ",$1," in
+ *,$2,*)
+ return 0
+ ;;
+ esac
+ return 1
+}
+
# This is not among top-level (test_expect_success | test_expect_failure)
# but is a prefix that can be used in the test script, like:
#
@@ -551,18 +597,34 @@ test_line_count () {
# the failure could be due to a segv. We want a controlled failure.
test_must_fail () {
+ case "$1" in
+ ok=*)
+ _test_ok=${1#ok=}
+ shift
+ ;;
+ *)
+ _test_ok=
+ ;;
+ esac
"$@"
exit_code=$?
- if test $exit_code = 0; then
+ if test $exit_code -eq 0 && ! list_contains "$_test_ok" success
+ then
echo >&2 "test_must_fail: command succeeded: $*"
return 1
- elif test $exit_code -gt 129 && test $exit_code -le 192; then
- echo >&2 "test_must_fail: died by signal: $*"
+ elif test $exit_code -eq 141 && list_contains "$_test_ok" sigpipe
+ then
+ return 0
+ elif test $exit_code -gt 129 && test $exit_code -le 192
+ then
+ echo >&2 "test_must_fail: died by signal $(($exit_code - 128)): $*"
return 1
- elif test $exit_code = 127; then
+ elif test $exit_code -eq 127
+ then
echo >&2 "test_must_fail: command not found: $*"
return 1
- elif test $exit_code = 126; then
+ elif test $exit_code -eq 126
+ then
echo >&2 "test_must_fail: valgrind error: $*"
return 1
fi
@@ -581,16 +643,7 @@ test_must_fail () {
# because we want to notice if it fails due to segv.
test_might_fail () {
- "$@"
- exit_code=$?
- if test $exit_code -gt 129 && test $exit_code -le 192; then
- echo >&2 "test_might_fail: died by signal: $*"
- return 1
- elif test $exit_code = 127; then
- echo >&2 "test_might_fail: command not found: $*"
- return 1
- fi
- return 0
+ test_must_fail ok=success "$@"
}
# Similar to test_must_fail and test_might_fail, but check that a
@@ -713,6 +766,11 @@ test_seq () {
# what went wrong.
test_when_finished () {
+ # We cannot detect when we are in a subshell in general, but by
+ # doing so on Bash is better than nothing (the test will
+ # silently pass on other shells).
+ test "${BASH_SUBSHELL-0}" = 0 ||
+ error "bug in test script: test_when_finished does nothing in a subshell"
test_cleanup="{ $*
} && (exit \"\$eval_ret\"); eval_ret=\$?; $test_cleanup"
}