diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 06364de3ef..fcb8aedc27 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -247,11 +247,9 @@ jobs:
env:
PHP_CODESNIFFER_CBF: '1'
- # Note: The code style check is run multiple times against every PHP version
- # as it also acts as an integration test.
- - name: 'PHPCS: check code style without cache, no parallel'
+ - name: "Run bashunit tests"
if: ${{ matrix.custom_ini == false }}
- run: php "bin/phpcs" --no-cache --parallel=1
+ run: "./scripts/bashunit -p tests/EndToEnd"
- name: Download the PHPCS phar
if: ${{ matrix.custom_ini == false }}
diff --git a/scripts/bashunit b/scripts/bashunit
new file mode 100755
index 0000000000..d934b90a06
--- /dev/null
+++ b/scripts/bashunit
@@ -0,0 +1,3400 @@
+#!/usr/bin/env bash
+# check_os.sh
+
+# shellcheck disable=SC2034
+_OS="Unknown"
+_DISTRO="Unknown"
+
+function check_os::init() {
+ if check_os::is_linux; then
+ _OS="Linux"
+ if check_os::is_ubuntu; then
+ _DISTRO="Ubuntu"
+ elif check_os::is_alpine; then
+ _DISTRO="Alpine"
+ else
+ _DISTRO="Other"
+ fi
+ elif check_os::is_macos; then
+ _OS="OSX"
+ elif check_os::is_windows; then
+ _OS="Windows"
+ else
+ _OS="Unknown"
+ _DISTRO="Unknown"
+ fi
+}
+
+function check_os::is_ubuntu() {
+ command -v apt > /dev/null
+}
+
+function check_os::is_alpine() {
+ command -v apk > /dev/null
+}
+
+function check_os::is_linux() {
+ [[ "$(uname)" == "Linux" ]]
+}
+
+function check_os::is_macos() {
+ [[ "$(uname)" == "Darwin" ]]
+}
+
+function check_os::is_windows() {
+ [[ "$(uname)" == *"MINGW"* ]]
+}
+
+function check_os::is_busybox() {
+
+ case "$_DISTRO" in
+
+ "Alpine")
+ return 0
+ ;;
+ *)
+ return 1
+ ;;
+ esac
+}
+
+check_os::init
+
+export _OS
+export _DISTRO
+export -f check_os::is_alpine
+export -f check_os::is_busybox
+export -f check_os::is_ubuntu
+
+# str.sh
+
+function str::rpad() {
+ local left_text="$1"
+ local right_word="$2"
+ local width_padding="${3:-$TERMINAL_WIDTH}"
+ # Subtract 1 more to account for the extra space
+ local padding=$((width_padding - ${#right_word} - 1))
+
+ # Remove ANSI escape sequences (non-visible characters) for length calculation
+ # shellcheck disable=SC2155
+ local clean_left_text=$(echo -e "$left_text" | sed 's/\x1b\[[0-9;]*m//g')
+
+ local is_truncated=false
+ # If the visible left text exceeds the padding, truncate it and add "..."
+ if [[ ${#clean_left_text} -gt $padding ]]; then
+ local truncation_length=$((padding - 3)) # Subtract 3 for "..."
+ clean_left_text="${clean_left_text:0:$truncation_length}"
+ is_truncated=true
+ fi
+
+ # Rebuild the text with ANSI codes intact, preserving the truncation
+ local result_left_text=""
+ local i=0
+ local j=0
+ while [[ $i -lt ${#clean_left_text} && $j -lt ${#left_text} ]]; do
+ local char="${clean_left_text:$i:1}"
+ local original_char="${left_text:$j:1}"
+
+ # If the current character is part of an ANSI sequence, skip it and copy it
+ if [[ "$original_char" == $'\x1b' ]]; then
+ while [[ "${left_text:$j:1}" != "m" && $j -lt ${#left_text} ]]; do
+ result_left_text+="${left_text:$j:1}"
+ ((j++))
+ done
+ result_left_text+="${left_text:$j:1}" # Append the final 'm'
+ ((j++))
+ elif [[ "$char" == "$original_char" ]]; then
+ # Match the actual character
+ result_left_text+="$char"
+ ((i++))
+ ((j++))
+ else
+ ((j++))
+ fi
+ done
+
+ local remaining_space
+ if $is_truncated ; then
+ result_left_text+="..."
+ # 1: due to a blank space
+ # 3: due to the appended ...
+ remaining_space=$((width_padding - ${#clean_left_text} - ${#right_word} - 1 - 3))
+ else
+ # Copy any remaining characters after the truncation point
+ result_left_text+="${left_text:$j}"
+ remaining_space=$((width_padding - ${#clean_left_text} - ${#right_word} - 1))
+ fi
+
+ # Ensure the right word is placed exactly at the far right of the screen
+ # filling the remaining space with padding
+ if [[ $remaining_space -lt 0 ]]; then
+ remaining_space=0
+ fi
+
+ printf "%s%${remaining_space}s %s\n" "$result_left_text" "" "$right_word"
+}
+
+# globals.sh
+set -euo pipefail
+
+# This file provides a set of global functions to developers.
+
+function current_dir() {
+ dirname "${BASH_SOURCE[1]}"
+}
+
+function current_filename() {
+ basename "${BASH_SOURCE[1]}"
+}
+
+function caller_filename() {
+ dirname "${BASH_SOURCE[2]}"
+}
+
+function caller_line() {
+ echo "${BASH_LINENO[1]}"
+}
+
+function current_timestamp() {
+ date +"%Y-%m-%d %H:%M:%S"
+}
+
+function is_command_available() {
+ command -v "$1" >/dev/null 2>&1
+}
+
+function random_str() {
+ local length=${1:-6}
+ local chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
+ local str=''
+ for (( i=0; i> "$BASHUNIT_DEV_LOG"
+}
+
+# dependencies.sh
+set -euo pipefail
+
+function dependencies::has_perl() {
+ command -v perl >/dev/null 2>&1
+}
+
+function dependencies::has_powershell() {
+ command -v powershell > /dev/null 2>&1
+}
+
+function dependencies::has_adjtimex() {
+ command -v adjtimex >/dev/null 2>&1
+}
+
+function dependencies::has_bc() {
+ command -v bc >/dev/null 2>&1
+}
+
+function dependencies::has_awk() {
+ command -v awk >/dev/null 2>&1
+}
+
+function dependencies::has_git() {
+ command -v git >/dev/null 2>&1
+}
+
+function dependencies::has_curl() {
+ command -v curl >/dev/null 2>&1
+}
+
+function dependencies::has_wget() {
+ command -v wget >/dev/null 2>&1
+}
+
+# io.sh
+
+function io::download_to() {
+ local url="$1"
+ local output="$2"
+ if dependencies::has_curl; then
+ curl -L -J -o "$output" "$url" 2>/dev/null
+ elif dependencies::has_wget; then
+ wget -q -O "$output" "$url" 2>/dev/null
+ else
+ return 1
+ fi
+}
+
+# math.sh
+
+if dependencies::has_bc; then
+ # bc is better than awk because bc has no integer limits.
+ function math::calculate() {
+ echo "$*" | bc
+ }
+elif dependencies::has_awk; then
+ function math::calculate() {
+ awk "BEGIN { print ""$*"" }"
+ }
+fi
+
+# parallel.sh
+
+function parallel::aggregate_test_results() {
+ local temp_dir_parallel_test_suite=$1
+
+ local total_failed=0
+ local total_passed=0
+ local total_skipped=0
+ local total_incomplete=0
+ local total_snapshot=0
+
+ for script_dir in "$temp_dir_parallel_test_suite"/*; do
+ if ! compgen -G "$script_dir"/*.result > /dev/null; then
+ printf "%sNo tests found%s" "$_COLOR_SKIPPED" "$_COLOR_DEFAULT"
+ continue
+ fi
+
+ for result_file in "$script_dir"/*.result; do
+ local result_line
+ result_line=$(tail -n 1 "$result_file")
+
+ local failed="${result_line##*##ASSERTIONS_FAILED=}"
+ failed="${failed%%##*}"; failed=${failed:-0}
+
+ local passed="${result_line##*##ASSERTIONS_PASSED=}"
+ passed="${passed%%##*}"; passed=${passed:-0}
+
+ local skipped="${result_line##*##ASSERTIONS_SKIPPED=}"
+ skipped="${skipped%%##*}"; skipped=${skipped:-0}
+
+ local incomplete="${result_line##*##ASSERTIONS_INCOMPLETE=}"
+ incomplete="${incomplete%%##*}"; incomplete=${incomplete:-0}
+
+ local snapshot="${result_line##*##ASSERTIONS_SNAPSHOT=}"
+ snapshot="${snapshot%%##*}"; snapshot=${snapshot:-0}
+
+ # Add to the total counts
+ total_failed=$((total_failed + failed))
+ total_passed=$((total_passed + passed))
+ total_skipped=$((total_skipped + skipped))
+ total_incomplete=$((total_incomplete + incomplete))
+ total_snapshot=$((total_snapshot + snapshot))
+
+ if [ "${failed:-0}" -gt 0 ]; then
+ state::add_tests_failed
+ continue
+ fi
+
+ if [ "${snapshot:-0}" -gt 0 ]; then
+ state::add_tests_snapshot
+ continue
+ fi
+
+ if [ "${incomplete:-0}" -gt 0 ]; then
+ state::add_tests_incomplete
+ continue
+ fi
+
+ if [ "${skipped:-0}" -gt 0 ]; then
+ state::add_tests_skipped
+ continue
+ fi
+
+ state::add_tests_passed
+ done
+ done
+
+ export _ASSERTIONS_FAILED=$total_failed
+ export _ASSERTIONS_PASSED=$total_passed
+ export _ASSERTIONS_SKIPPED=$total_skipped
+ export _ASSERTIONS_INCOMPLETE=$total_incomplete
+ export _ASSERTIONS_SNAPSHOT=$total_snapshot
+}
+
+function parallel::mark_stop_on_failure() {
+ touch "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE"
+}
+
+function parallel::must_stop_on_failure() {
+ [[ -f "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE" ]]
+}
+
+function parallel::reset() {
+ # shellcheck disable=SC2153
+ rm -rf "$TEMP_DIR_PARALLEL_TEST_SUITE"
+ [ -f "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE" ] && rm "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE"
+}
+
+function parallel::is_enabled() {
+ if env::is_parallel_run_enabled && \
+ (check_os::is_macos || check_os::is_ubuntu || check_os::is_windows); then
+ return 0
+ fi
+ return 1
+}
+
+# env.sh
+
+# shellcheck disable=SC2034
+
+set -o allexport
+# shellcheck source=/dev/null
+[[ -f ".env" ]] && source .env set
+set +o allexport
+
+_DEFAULT_DEFAULT_PATH="tests"
+_DEFAULT_BOOTSTRAP="tests/bootstrap.sh"
+_DEFAULT_DEV_LOG=""
+_DEFAULT_LOG_JUNIT=""
+_DEFAULT_REPORT_HTML=""
+
+: "${BASHUNIT_DEFAULT_PATH:=${DEFAULT_PATH:=$_DEFAULT_DEFAULT_PATH}}"
+: "${BASHUNIT_DEV_LOG:=${DEV_LOG:=$_DEFAULT_DEV_LOG}}"
+: "${BASHUNIT_BOOTSTRAP:=${BOOTSTRAP:=$_DEFAULT_BOOTSTRAP}}"
+: "${BASHUNIT_LOG_JUNIT:=${LOG_JUNIT:=$_DEFAULT_LOG_JUNIT}}"
+: "${BASHUNIT_REPORT_HTML:=${REPORT_HTML:=$_DEFAULT_REPORT_HTML}}"
+
+# Booleans
+_DEFAULT_PARALLEL_RUN="false"
+_DEFAULT_SHOW_HEADER="true"
+_DEFAULT_HEADER_ASCII_ART="false"
+_DEFAULT_SIMPLE_OUTPUT="false"
+_DEFAULT_STOP_ON_FAILURE="false"
+_DEFAULT_SHOW_EXECUTION_TIME="true"
+_DEFAULT_VERBOSE="false"
+
+: "${BASHUNIT_PARALLEL_RUN:=${PARALLEL_RUN:=$_DEFAULT_PARALLEL_RUN}}"
+: "${BASHUNIT_SHOW_HEADER:=${SHOW_HEADER:=$_DEFAULT_SHOW_HEADER}}"
+: "${BASHUNIT_HEADER_ASCII_ART:=${HEADER_ASCII_ART:=$_DEFAULT_HEADER_ASCII_ART}}"
+: "${BASHUNIT_SIMPLE_OUTPUT:=${SIMPLE_OUTPUT:=$_DEFAULT_SIMPLE_OUTPUT}}"
+: "${BASHUNIT_STOP_ON_FAILURE:=${STOP_ON_FAILURE:=$_DEFAULT_STOP_ON_FAILURE}}"
+: "${BASHUNIT_SHOW_EXECUTION_TIME:=${SHOW_EXECUTION_TIME:=$_DEFAULT_SHOW_EXECUTION_TIME}}"
+: "${BASHUNIT_VERBOSE:=${VERBOSE:=$_DEFAULT_VERBOSE}}"
+
+function env::is_parallel_run_enabled() {
+ [[ "$BASHUNIT_PARALLEL_RUN" == "true" ]]
+}
+
+function env::is_show_header_enabled() {
+ [[ "$BASHUNIT_SHOW_HEADER" == "true" ]]
+}
+
+function env::is_header_ascii_art_enabled() {
+ [[ "$BASHUNIT_HEADER_ASCII_ART" == "true" ]]
+}
+
+function env::is_simple_output_enabled() {
+ [[ "$BASHUNIT_SIMPLE_OUTPUT" == "true" ]]
+}
+
+function env::is_stop_on_failure_enabled() {
+ [[ "$BASHUNIT_STOP_ON_FAILURE" == "true" ]]
+}
+
+function env::is_show_execution_time_enabled() {
+ [[ "$BASHUNIT_SHOW_EXECUTION_TIME" == "true" ]]
+}
+
+function env::is_dev_mode_enabled() {
+ [[ -n "$BASHUNIT_DEV_LOG" ]]
+}
+
+function env::is_verbose_enabled() {
+ [[ "$BASHUNIT_VERBOSE" == "true" ]]
+}
+
+function env::active_internet_connection() {
+ if ping -c 1 -W 3 google.com &> /dev/null; then
+ return 0
+ fi
+
+ return 1
+}
+
+function env::find_terminal_width() {
+ local cols=""
+
+ if [[ -z "$cols" ]] && command -v stty > /dev/null; then
+ cols=$(tput cols 2>/dev/null)
+ fi
+ if [[ -n "$TERM" ]] && command -v tput > /dev/null; then
+ cols=$(stty size 2>/dev/null | cut -d' ' -f2)
+ fi
+
+ # Directly echo the value with fallback
+ echo "${cols:-100}"
+}
+
+function env::print_verbose() {
+ local keys=(
+ "BASHUNIT_DEFAULT_PATH"
+ "BASHUNIT_DEV_LOG"
+ "BASHUNIT_BOOTSTRAP"
+ "BASHUNIT_LOG_JUNIT"
+ "BASHUNIT_REPORT_HTML"
+ "BASHUNIT_PARALLEL_RUN"
+ "BASHUNIT_SHOW_HEADER"
+ "BASHUNIT_HEADER_ASCII_ART"
+ "BASHUNIT_SIMPLE_OUTPUT"
+ "BASHUNIT_STOP_ON_FAILURE"
+ "BASHUNIT_SHOW_EXECUTION_TIME"
+ "BASHUNIT_VERBOSE"
+ )
+
+ local max_length=0
+
+ for key in "${keys[@]}"; do
+ if (( ${#key} > max_length )); then
+ max_length=${#key}
+ fi
+ done
+
+ for key in "${keys[@]}"; do
+ printf "%s:%*s%s\n" "$key" $((max_length - ${#key} + 1)) "" "${!key}"
+ done
+}
+
+EXIT_CODE_STOP_ON_FAILURE=4
+TEMP_DIR_PARALLEL_TEST_SUITE="/tmp/bashunit/parallel/${_OS:-Unknown}"
+TEMP_FILE_PARALLEL_STOP_ON_FAILURE="$TEMP_DIR_PARALLEL_TEST_SUITE/.stop-on-failure"
+TERMINAL_WIDTH="$(env::find_terminal_width)"
+FAILURES_OUTPUT_PATH=$(mktemp)
+CAT="$(which cat)"
+
+# clock.sh
+
+function clock::now() {
+ if dependencies::has_perl && perl -MTime::HiRes -e "" > /dev/null 2>&1; then
+ if perl -MTime::HiRes -e 'printf("%.0f\n",Time::HiRes::time()*1000000000)'; then
+ return 0
+ fi
+ fi
+
+ if check_os::is_windows && dependencies::has_powershell; then
+ powershell -Command "
+ \$unixEpoch = [DateTime]'1970-01-01 00:00:00';
+ \$now = [DateTime]::UtcNow;
+ \$ticksSinceEpoch = (\$now - \$unixEpoch).Ticks;
+ \$nanosecondsSinceEpoch = \$ticksSinceEpoch * 100;
+ Write-Output \$nanosecondsSinceEpoch
+ "
+ return 0
+ fi
+
+ if ! check_os::is_macos && ! check_os::is_alpine; then
+ local result
+ result=$(date +%s%N)
+ if [[ "$result" != *N ]] && [[ "$result" -gt 0 ]]; then
+ echo "$result"
+ return 0
+ fi
+ fi
+
+ local shell_time has_shell_time
+ shell_time="$(clock::shell_time)"
+ has_shell_time="$?"
+ if [[ "$has_shell_time" -eq 0 ]]; then
+ local seconds microseconds
+ seconds=$(echo "$shell_time" | cut -f 1 -d '.')
+ microseconds=$(echo "$shell_time" | cut -f 2 -d '.')
+
+ math::calculate "($seconds * 1000000000) + ($microseconds * 1000)"
+ return 0
+ fi
+
+ echo ""
+ return 1
+}
+
+function clock::shell_time() {
+ # Get time directly from the shell rather than a program.
+ [[ -n ${EPOCHREALTIME+x} && -n "$EPOCHREALTIME" ]] && LC_ALL=C echo "$EPOCHREALTIME"
+}
+
+
+function clock::total_runtime_in_milliseconds() {
+ end_time=$(clock::now)
+ if [[ -n $end_time ]]; then
+ math::calculate "($end_time-$_START_TIME)/1000000"
+ else
+ echo ""
+ fi
+}
+
+function clock::total_runtime_in_nanoseconds() {
+ end_time=$(clock::now)
+ if [[ -n $end_time ]]; then
+ math::calculate "($end_time-$_START_TIME)"
+ else
+ echo ""
+ fi
+}
+
+function clock::init() {
+ _START_TIME=$(clock::now)
+}
+
+# state.sh
+
+_TESTS_PASSED=0
+_TESTS_FAILED=0
+_TESTS_SKIPPED=0
+_TESTS_INCOMPLETE=0
+_TESTS_SNAPSHOT=0
+_ASSERTIONS_PASSED=0
+_ASSERTIONS_FAILED=0
+_ASSERTIONS_SKIPPED=0
+_ASSERTIONS_INCOMPLETE=0
+_ASSERTIONS_SNAPSHOT=0
+_DUPLICATED_FUNCTION_NAMES=""
+_FILE_WITH_DUPLICATED_FUNCTION_NAMES=""
+_DUPLICATED_TEST_FUNCTIONS_FOUND=false
+_TEST_OUTPUT=""
+_TEST_EXIT_CODE=0
+
+function state::get_tests_passed() {
+ echo "$_TESTS_PASSED"
+}
+
+function state::add_tests_passed() {
+ ((_TESTS_PASSED++)) || true
+}
+
+function state::get_tests_failed() {
+ echo "$_TESTS_FAILED"
+}
+
+function state::add_tests_failed() {
+ ((_TESTS_FAILED++)) || true
+}
+
+function state::get_tests_skipped() {
+ echo "$_TESTS_SKIPPED"
+}
+
+function state::add_tests_skipped() {
+ ((_TESTS_SKIPPED++)) || true
+}
+
+function state::get_tests_incomplete() {
+ echo "$_TESTS_INCOMPLETE"
+}
+
+function state::add_tests_incomplete() {
+ ((_TESTS_INCOMPLETE++)) || true
+}
+
+function state::get_tests_snapshot() {
+ echo "$_TESTS_SNAPSHOT"
+}
+
+function state::add_tests_snapshot() {
+ ((_TESTS_SNAPSHOT++)) || true
+}
+
+function state::get_assertions_passed() {
+ echo "$_ASSERTIONS_PASSED"
+}
+
+function state::add_assertions_passed() {
+ ((_ASSERTIONS_PASSED++)) || true
+}
+
+function state::get_assertions_failed() {
+ echo "$_ASSERTIONS_FAILED"
+}
+
+function state::add_assertions_failed() {
+ ((_ASSERTIONS_FAILED++)) || true
+}
+
+function state::get_assertions_skipped() {
+ echo "$_ASSERTIONS_SKIPPED"
+}
+
+function state::add_assertions_skipped() {
+ ((_ASSERTIONS_SKIPPED++)) || true
+}
+
+function state::get_assertions_incomplete() {
+ echo "$_ASSERTIONS_INCOMPLETE"
+}
+
+function state::add_assertions_incomplete() {
+ ((_ASSERTIONS_INCOMPLETE++)) || true
+}
+
+function state::get_assertions_snapshot() {
+ echo "$_ASSERTIONS_SNAPSHOT"
+}
+
+function state::add_assertions_snapshot() {
+ ((_ASSERTIONS_SNAPSHOT++)) || true
+}
+
+function state::is_duplicated_test_functions_found() {
+ echo "$_DUPLICATED_TEST_FUNCTIONS_FOUND"
+}
+
+function state::set_duplicated_test_functions_found() {
+ _DUPLICATED_TEST_FUNCTIONS_FOUND=true
+}
+
+function state::get_duplicated_function_names() {
+ echo "$_DUPLICATED_FUNCTION_NAMES"
+}
+
+function state::set_duplicated_function_names() {
+ _DUPLICATED_FUNCTION_NAMES="$1"
+}
+
+function state::get_file_with_duplicated_function_names() {
+ echo "$_FILE_WITH_DUPLICATED_FUNCTION_NAMES"
+}
+
+function state::set_file_with_duplicated_function_names() {
+ _FILE_WITH_DUPLICATED_FUNCTION_NAMES="$1"
+}
+
+function state::add_test_output() {
+ _TEST_OUTPUT+="$1"
+}
+
+function state::get_test_exit_code() {
+ echo "$_TEST_EXIT_CODE"
+}
+
+function state::set_test_exit_code() {
+ _TEST_EXIT_CODE="$1"
+}
+
+function state::set_duplicated_functions_merged() {
+ state::set_duplicated_test_functions_found
+ state::set_file_with_duplicated_function_names "$1"
+ state::set_duplicated_function_names "$2"
+}
+
+function state::initialize_assertions_count() {
+ _ASSERTIONS_PASSED=0
+ _ASSERTIONS_FAILED=0
+ _ASSERTIONS_SKIPPED=0
+ _ASSERTIONS_INCOMPLETE=0
+ _ASSERTIONS_SNAPSHOT=0
+ _TEST_OUTPUT=""
+}
+
+function state::export_subshell_context() {
+ local encoded_test_output
+
+ if base64 --help 2>&1 | grep -q -- "-w"; then
+ # Alpine requires the -w 0 option to avoid wrapping
+ encoded_test_output=$(echo -n "$_TEST_OUTPUT" | base64 -w 0)
+ else
+ # macOS and others: default base64 without wrapping
+ encoded_test_output=$(echo -n "$_TEST_OUTPUT" | base64)
+ fi
+
+ cat <<<
+ Run a core assert function standalone without a test context.
+
+ -e, --env, --boot
+ Load a custom file, overriding the existing .env variables or loading a file with global functions.
+
+ -f, --filter
+ Filters the tests to run based on the test name.
+
+ -l, --log-junit
+ Create a report JUnit XML file that contains information about the test results.
+
+ -p, --parallel || --no-parallel [default]
+ Run each test in child process, randomizing the tests execution order.
+
+ -r, --report-html
+ Create a report HTML file that contains information about the test results.
+
+ -s, --simple || --detailed [default]
+ Enables simple or detailed output to the console.
+
+ -S, --stop-on-failure
+ Force to stop the runner right after encountering one failing test.
+
+ --debug
+ Print all executed shell commands to the terminal.
+ If a file-path is passed, it will redirect the output to that file.
+
+ -vvv, --verbose
+ Display internal details for each test.
+
+ --version
+ Displays the current version of bashunit.
+
+ --upgrade
+ Upgrade to latest version of bashunit.
+
+ -h, --help
+ This message.
+
+See more: https://bashunit.typeddevs.com/command-line
+EOF
+}
+
+# console_results.sh
+# shellcheck disable=SC2155
+
+_TOTAL_TESTS_COUNT=0
+
+function console_results::render_result() {
+ if [[ "$(state::is_duplicated_test_functions_found)" == true ]]; then
+ console_results::print_execution_time
+ printf "%s%s%s\n" "${_COLOR_RETURN_ERROR}" "Duplicate test functions found" "${_COLOR_DEFAULT}"
+ printf "File with duplicate functions: %s\n" "$(state::get_file_with_duplicated_function_names)"
+ printf "Duplicate functions: %s\n" "$(state::get_duplicated_function_names)"
+ return 1
+ fi
+
+ if env::is_simple_output_enabled; then
+ printf "\n\n"
+ fi
+
+ local total_tests=0
+ ((total_tests += $(state::get_tests_passed))) || true
+ ((total_tests += $(state::get_tests_skipped))) || true
+ ((total_tests += $(state::get_tests_incomplete))) || true
+ ((total_tests += $(state::get_tests_snapshot))) || true
+ ((total_tests += $(state::get_tests_failed))) || true
+
+ local total_assertions=0
+ ((total_assertions += $(state::get_assertions_passed))) || true
+ ((total_assertions += $(state::get_assertions_skipped))) || true
+ ((total_assertions += $(state::get_assertions_incomplete))) || true
+ ((total_assertions += $(state::get_assertions_snapshot))) || true
+ ((total_assertions += $(state::get_assertions_failed))) || true
+
+ printf "%sTests: %s" "$_COLOR_FAINT" "$_COLOR_DEFAULT"
+ if [[ "$(state::get_tests_passed)" -gt 0 ]] || [[ "$(state::get_assertions_passed)" -gt 0 ]]; then
+ printf " %s%s passed%s," "$_COLOR_PASSED" "$(state::get_tests_passed)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_skipped)" -gt 0 ]] || [[ "$(state::get_assertions_skipped)" -gt 0 ]]; then
+ printf " %s%s skipped%s," "$_COLOR_SKIPPED" "$(state::get_tests_skipped)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_incomplete)" -gt 0 ]] || [[ "$(state::get_assertions_incomplete)" -gt 0 ]]; then
+ printf " %s%s incomplete%s," "$_COLOR_INCOMPLETE" "$(state::get_tests_incomplete)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_snapshot)" -gt 0 ]] || [[ "$(state::get_assertions_snapshot)" -gt 0 ]]; then
+ printf " %s%s snapshot%s," "$_COLOR_SNAPSHOT" "$(state::get_tests_snapshot)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_failed)" -gt 0 ]] || [[ "$(state::get_assertions_failed)" -gt 0 ]]; then
+ printf " %s%s failed%s," "$_COLOR_FAILED" "$(state::get_tests_failed)" "$_COLOR_DEFAULT"
+ fi
+ printf " %s total\n" "$total_tests"
+
+ printf "%sAssertions:%s" "$_COLOR_FAINT" "$_COLOR_DEFAULT"
+ if [[ "$(state::get_tests_passed)" -gt 0 ]] || [[ "$(state::get_assertions_passed)" -gt 0 ]]; then
+ printf " %s%s passed%s," "$_COLOR_PASSED" "$(state::get_assertions_passed)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_skipped)" -gt 0 ]] || [[ "$(state::get_assertions_skipped)" -gt 0 ]]; then
+ printf " %s%s skipped%s," "$_COLOR_SKIPPED" "$(state::get_assertions_skipped)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_incomplete)" -gt 0 ]] || [[ "$(state::get_assertions_incomplete)" -gt 0 ]]; then
+ printf " %s%s incomplete%s," "$_COLOR_INCOMPLETE" "$(state::get_assertions_incomplete)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_snapshot)" -gt 0 ]] || [[ "$(state::get_assertions_snapshot)" -gt 0 ]]; then
+ printf " %s%s snapshot%s," "$_COLOR_SNAPSHOT" "$(state::get_assertions_snapshot)" "$_COLOR_DEFAULT"
+ fi
+ if [[ "$(state::get_tests_failed)" -gt 0 ]] || [[ "$(state::get_assertions_failed)" -gt 0 ]]; then
+ printf " %s%s failed%s," "$_COLOR_FAILED" "$(state::get_assertions_failed)" "$_COLOR_DEFAULT"
+ fi
+ printf " %s total\n" "$total_assertions"
+
+ if [[ "$(state::get_tests_failed)" -gt 0 ]]; then
+ printf "\n%s%s%s\n" "$_COLOR_RETURN_ERROR" " Some tests failed " "$_COLOR_DEFAULT"
+ console_results::print_execution_time
+ return 1
+ fi
+
+ if [[ "$(state::get_tests_incomplete)" -gt 0 ]]; then
+ printf "\n%s%s%s\n" "$_COLOR_RETURN_INCOMPLETE" " Some tests incomplete " "$_COLOR_DEFAULT"
+ console_results::print_execution_time
+ return 0
+ fi
+
+ if [[ "$(state::get_tests_skipped)" -gt 0 ]]; then
+ printf "\n%s%s%s\n" "$_COLOR_RETURN_SKIPPED" " Some tests skipped " "$_COLOR_DEFAULT"
+ console_results::print_execution_time
+ return 0
+ fi
+
+ if [[ "$(state::get_tests_snapshot)" -gt 0 ]]; then
+ printf "\n%s%s%s\n" "$_COLOR_RETURN_SNAPSHOT" " Some snapshots created " "$_COLOR_DEFAULT"
+ console_results::print_execution_time
+ return 0
+ fi
+
+ if [[ $total_tests -eq 0 ]]; then
+ printf "\n%s%s%s\n" "$_COLOR_RETURN_ERROR" " No tests found " "$_COLOR_DEFAULT"
+ console_results::print_execution_time
+ return 1
+ fi
+
+ printf "\n%s%s%s\n" "$_COLOR_RETURN_SUCCESS" " All tests passed " "$_COLOR_DEFAULT"
+ console_results::print_execution_time
+ return 0
+}
+
+function console_results::print_execution_time() {
+ if ! env::is_show_execution_time_enabled; then
+ return
+ fi
+
+ local time=$(clock::total_runtime_in_milliseconds | awk '{printf "%.0f", $1}')
+
+ if [[ "$time" -lt 1000 ]]; then
+ printf "${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" \
+ "Time taken: $time ms"
+ return
+ fi
+
+ local time_in_seconds=$(( time / 1000 ))
+ local remainder_ms=$(( time % 1000 ))
+ local formatted_seconds=$(echo "$time_in_seconds.$remainder_ms" | awk '{printf "%.0f", $1}')
+
+ printf "${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" \
+ "Time taken: $formatted_seconds s"
+}
+
+function console_results::print_successful_test() {
+ local test_name=$1
+ shift
+ local duration=${1:-"0"}
+ shift
+
+ local line
+ if [[ -z "$*" ]]; then
+ line=$(printf "%s✓ Passed%s: %s" "$_COLOR_PASSED" "$_COLOR_DEFAULT" "$test_name")
+ else
+ line=$(printf "%s✓ Passed%s: %s (%s)" "$_COLOR_PASSED" "$_COLOR_DEFAULT" "$test_name" "$*")
+ fi
+
+ local full_line=$line
+ if env::is_show_execution_time_enabled; then
+ full_line="$(printf "%s\n" "$(str::rpad "$line" "$duration ms")")"
+ fi
+
+ state::print_line "successful" "$full_line"
+}
+
+function console_results::print_failure_message() {
+ local test_name=$1
+ local failure_message=$2
+
+ local line
+ line="$(printf "\
+${_COLOR_FAILED}✗ Failed${_COLOR_DEFAULT}: %s
+ ${_COLOR_FAINT}Message:${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT}\n"\
+ "${test_name}" "${failure_message}")"
+
+ state::print_line "failure" "$line"
+}
+
+function console_results::print_failed_test() {
+ local function_name=$1
+ local expected=$2
+ local failure_condition_message=$3
+ local actual=$4
+ local extra_key=${5-}
+ local extra_value=${6-}
+
+ local line
+ line="$(printf "\
+${_COLOR_FAILED}✗ Failed${_COLOR_DEFAULT}: %s
+ ${_COLOR_FAINT}Expected${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT}
+ ${_COLOR_FAINT}%s${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT}\n" \
+ "${function_name}" "${expected}" "${failure_condition_message}" "${actual}")"
+
+ if [ -n "$extra_key" ]; then
+ line+="$(printf "\
+
+ ${_COLOR_FAINT}%s${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT}\n" \
+ "${extra_key}" "${extra_value}")"
+ fi
+
+ state::print_line "failed" "$line"
+}
+
+
+function console_results::print_failed_snapshot_test() {
+ local function_name=$1
+ local snapshot_file=$2
+
+ local line
+ line="$(printf "${_COLOR_FAILED}✗ Failed${_COLOR_DEFAULT}: %s
+ ${_COLOR_FAINT}Expected to match the snapshot${_COLOR_DEFAULT}\n" "$function_name")"
+
+ if dependencies::has_git; then
+ local actual_file="${snapshot_file}.tmp"
+ echo "$actual" > "$actual_file"
+
+ local git_diff_output
+ git_diff_output="$(git diff --no-index --word-diff --color=always \
+ "$snapshot_file" "$actual_file" 2>/dev/null \
+ | tail -n +6 | sed "s/^/ /")"
+
+ line+="$git_diff_output"
+ rm "$actual_file"
+ fi
+
+ state::print_line "failed_snapshot" "$line"
+}
+
+function console_results::print_skipped_test() {
+ local function_name=$1
+ local reason=${2-}
+
+ local line
+ line="$(printf "${_COLOR_SKIPPED}↷ Skipped${_COLOR_DEFAULT}: %s\n" "${function_name}")"
+
+ if [[ -n "$reason" ]]; then
+ line+="$(printf "${_COLOR_FAINT} %s${_COLOR_DEFAULT}\n" "${reason}")"
+ fi
+
+ state::print_line "skipped" "$line"
+}
+
+function console_results::print_incomplete_test() {
+ local function_name=$1
+ local pending=${2-}
+
+ local line
+ line="$(printf "${_COLOR_INCOMPLETE}✒ Incomplete${_COLOR_DEFAULT}: %s\n" "${function_name}")"
+
+ if [[ -n "$pending" ]]; then
+ line+="$(printf "${_COLOR_FAINT} %s${_COLOR_DEFAULT}\n" "${pending}")"
+ fi
+
+ state::print_line "incomplete" "$line"
+}
+
+function console_results::print_snapshot_test() {
+ local function_name=$1
+ local test_name
+ test_name=$(helper::normalize_test_function_name "$function_name")
+
+ local line
+ line="$(printf "${_COLOR_SNAPSHOT}✎ Snapshot${_COLOR_DEFAULT}: %s\n" "${test_name}")"
+
+ state::print_line "snapshot" "$line"
+}
+
+function console_results::print_error_test() {
+ local function_name=$1
+ local error="$2"
+
+ local test_name
+ test_name=$(helper::normalize_test_function_name "$function_name")
+
+ local line
+ line="$(printf "${_COLOR_FAILED}✗ Error${_COLOR_DEFAULT}: %s
+ ${_COLOR_FAINT}%s${_COLOR_DEFAULT}\n" "${test_name}" "${error}")"
+
+ state::print_line "error" "$line"
+}
+
+function console_results::print_failing_tests_and_reset() {
+ if [[ -s "$FAILURES_OUTPUT_PATH" ]]; then
+ local total_failed
+ total_failed=$(state::get_tests_failed)
+
+ if env::is_simple_output_enabled; then
+ printf "\n\n"
+ fi
+
+ if [[ "$total_failed" -eq 1 ]]; then
+ echo -e "${_COLOR_BOLD}There was 1 failure:${_COLOR_DEFAULT}\n"
+ else
+ echo -e "${_COLOR_BOLD}There were $total_failed failures:${_COLOR_DEFAULT}\n"
+ fi
+
+ sed '${/^$/d;}' "$FAILURES_OUTPUT_PATH" | sed 's/^/|/'
+ rm "$FAILURES_OUTPUT_PATH"
+
+ echo ""
+ fi
+}
+
+# helpers.sh
+
+declare -r BASHUNIT_GIT_REPO="https://github.com/TypedDevs/bashunit"
+
+#
+# @param $1 string Eg: "test_some_logic_camelCase"
+#
+# @return string Eg: "Some logic camelCase"
+#
+function helper::normalize_test_function_name() {
+ local original_fn_name="${1-}"
+ local interpolated_fn_name="${2-}"
+
+ if [[ -n "${interpolated_fn_name-}" ]]; then
+ original_fn_name="$interpolated_fn_name"
+ fi
+
+ local result
+
+ # Remove the first "test_" prefix, if present
+ result="${original_fn_name#test_}"
+ # If no "test_" was removed (e.g., "testFoo"), remove the "test" prefix
+ if [[ "$result" == "$original_fn_name" ]]; then
+ result="${original_fn_name#test}"
+ fi
+ # Replace underscores with spaces
+ result="${result//_/ }"
+ # Capitalize the first letter
+ result="$(tr '[:lower:]' '[:upper:]' <<< "${result:0:1}")${result:1}"
+
+ echo "$result"
+}
+
+function helper::escape_single_quotes() {
+ local value="$1"
+ # shellcheck disable=SC1003
+ echo "${value//\'/'\'\\''\'}"
+}
+
+function helper::interpolate_function_name() {
+ local function_name="$1"
+ shift
+ local args=("$@")
+ local result="$function_name"
+
+ for ((i=0; i<${#args[@]}; i++)); do
+ local placeholder="::$((i+1))::"
+ # shellcheck disable=SC2155
+ local value="$(helper::escape_single_quotes "${args[$i]}")"
+ value="'$value'"
+ result="${result//${placeholder}/${value}}"
+ done
+
+ echo "$result"
+}
+
+function helper::check_duplicate_functions() {
+ local script="$1"
+
+ local filtered_lines
+ filtered_lines=$(grep -E '^[[:space:]]*(function[[:space:]]+)?test[a-zA-Z_][a-zA-Z0-9_]*\s*\(\)\s*\{' "$script")
+
+ local function_names
+ function_names=$(echo "$filtered_lines" | awk '{
+ for (i=1; i<=NF; i++) {
+ if ($i ~ /^test[a-zA-Z_][a-zA-Z0-9_]*\(\)$/) {
+ gsub(/\(\)/, "", $i)
+ print $i
+ break
+ }
+ }
+ }')
+
+ local duplicates
+ duplicates=$(echo "$function_names" | sort | uniq -d)
+ if [ -n "$duplicates" ]; then
+ state::set_duplicated_functions_merged "$script" "$duplicates"
+ return 1
+ fi
+ return 0
+}
+
+#
+# @param $1 string Eg: "prefix"
+# @param $2 string Eg: "filter"
+# @param $3 array Eg: "[fn1, fn2, prefix_filter_fn3, fn4, ...]"
+#
+# @return array Eg: "[prefix_filter_fn3, ...]" The filtered functions with prefix
+#
+function helper::get_functions_to_run() {
+ local prefix=$1
+ local filter=${2/test_/}
+ local function_names=$3
+
+ local filtered_functions=""
+
+ for fn in $function_names; do
+ if [[ $fn == ${prefix}_*${filter}* ]]; then
+ if [[ $filtered_functions == *" $fn"* ]]; then
+ return 1
+ fi
+ filtered_functions+=" $fn"
+ fi
+ done
+
+ echo "${filtered_functions# }"
+}
+
+#
+# @param $1 string Eg: "do_something"
+#
+function helper::execute_function_if_exists() {
+ if [[ "$(type -t "$1")" == "function" ]]; then
+ "$1" 2>/dev/null
+ fi
+}
+
+#
+# @param $1 string Eg: "do_something"
+#
+function helper::unset_if_exists() {
+ unset "$1" 2>/dev/null
+}
+
+function helper::find_files_recursive() {
+ ## Remove trailing slash using parameter expansion
+ local path="${1%%/}"
+
+ if [[ "$path" == *"*"* ]]; then
+ eval find "$path" -type f -name '*[tT]est.sh' | sort -u
+ elif [[ -d "$path" ]]; then
+ find "$path" -type f -name '*[tT]est.sh' | sort -u
+ else
+ echo "$path"
+ fi
+}
+
+function helper::normalize_variable_name() {
+ local input_string="$1"
+ local normalized_string
+
+ normalized_string="${input_string//[^a-zA-Z0-9_]/_}"
+
+ if [[ ! $normalized_string =~ ^[a-zA-Z_] ]]; then
+ normalized_string="_$normalized_string"
+ fi
+
+ echo "$normalized_string"
+}
+
+function helper::get_provider_data() {
+ local function_name="$1"
+ local script="$2"
+
+ if [[ ! -f "$script" ]]; then
+ return
+ fi
+
+ local data_provider_function
+ data_provider_function=$(
+ # shellcheck disable=SC1087
+ grep -B 2 -E "function[[:space:]]+$function_name[[:space:]]*\(\)" "$script" 2>/dev/null | \
+ grep -E "^[[:space:]]*# *@?data_provider[[:space:]]+" | \
+ sed -E 's/^[[:space:]]*# *@?data_provider[[:space:]]+//' || true
+ )
+
+ if [[ -n "$data_provider_function" ]]; then
+ helper::execute_function_if_exists "$data_provider_function"
+ fi
+}
+
+function helper::trim() {
+ local input_string="$1"
+ local trimmed_string
+
+ trimmed_string="${input_string#"${input_string%%[![:space:]]*}"}"
+ trimmed_string="${trimmed_string%"${trimmed_string##*[![:space:]]}"}"
+
+ echo "$trimmed_string"
+}
+
+function helpers::get_latest_tag() {
+ git ls-remote --tags "$BASHUNIT_GIT_REPO" |
+ awk '{print $2}' |
+ sed 's|^refs/tags/||' |
+ sort -Vr |
+ head -n 1
+}
+
+function helpers::find_total_tests() {
+ local filter=${1:-}
+ local files=("${@:2}")
+
+ if [[ ${#files[@]} -eq 0 ]]; then
+ echo 0
+ return
+ fi
+
+ local pattern='^\s*function\s+test'
+ if [[ -n "$filter" ]]; then
+ pattern+=".*$filter"
+ fi
+
+ grep -r -E "$pattern" --include="*[tT]est.sh" "${files[@]}" 2>/dev/null | wc -l | xargs
+}
+
+function helper::load_test_files() {
+ local filter=$1
+ local files=("${@:2}")
+
+ local test_files=()
+
+ if [[ "${#files[@]}" -eq 0 ]]; then
+ if [[ -n "${BASHUNIT_DEFAULT_PATH}" ]]; then
+ while IFS='' read -r line; do
+ test_files+=("$line")
+ done < <(helper::find_files_recursive "$BASHUNIT_DEFAULT_PATH")
+ fi
+ else
+ test_files=("${files[@]}")
+ fi
+
+ printf "%s\n" "${test_files[@]}"
+}
+
+# upgrade.sh
+
+function upgrade::upgrade() {
+ local script_path
+ script_path="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ local latest_tag
+ latest_tag="$(helpers::get_latest_tag)"
+
+ if [[ "$BASHUNIT_VERSION" == "$latest_tag" ]]; then
+ echo "> You are already on latest version"
+ return
+ fi
+
+ echo "> Upgrading bashunit to latest version"
+ cd "$script_path" || exit
+
+ if ! io::download_to "https://github.com/TypedDevs/bashunit/releases/download/$latest_tag/bashunit" "bashunit"; then
+ echo "Failed to download bashunit"
+ fi
+
+ chmod u+x "bashunit"
+
+ echo "> bashunit upgraded successfully to latest version $latest_tag"
+}
+
+# assertions.sh
+
+
+# assert.sh
+
+function fail() {
+ local message="${1:-${FUNCNAME[1]}}"
+
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failure_message "${label}" "$message"
+}
+
+function assert_true() {
+ local actual="$1"
+
+ # Check for expected literal values first
+ case "$actual" in
+ "true"|"0") state::add_assertions_passed; return ;;
+ "false"|"1") handle_bool_assertion_failure "true or 0" "$actual"; return ;;
+ esac
+
+ # Run command or eval and check the exit code
+ run_command_or_eval "$actual"
+ local exit_code=$?
+
+ if [[ $exit_code -ne 0 ]]; then
+ handle_bool_assertion_failure "command or function with zero exit code" "exit code: $exit_code"
+ else
+ state::add_assertions_passed
+ fi
+}
+
+function assert_false() {
+ local actual="$1"
+
+ # Check for expected literal values first
+ case "$actual" in
+ "false"|"1") state::add_assertions_passed; return ;;
+ "true"|"0") handle_bool_assertion_failure "false or 1" "$actual"; return ;;
+ esac
+
+ # Run command or eval and check the exit code
+ run_command_or_eval "$actual"
+ local exit_code=$?
+
+ if [[ $exit_code -eq 0 ]]; then
+ handle_bool_assertion_failure "command or function with non-zero exit code" "exit code: $exit_code"
+ else
+ state::add_assertions_passed
+ fi
+}
+
+function run_command_or_eval() {
+ local cmd="$1"
+
+ if [[ "$cmd" =~ ^eval ]]; then
+ eval "${cmd#eval }" &> /dev/null
+ elif [[ "$(command -v "$cmd")" =~ ^alias ]]; then
+ eval "$cmd" &> /dev/null
+ else
+ "$cmd" &> /dev/null
+ fi
+ return $?
+}
+
+function handle_bool_assertion_failure() {
+ local expected="$1"
+ local got="$2"
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[2]}")"
+
+ state::add_assertions_failed
+ console_results::print_failed_test "$label" "$expected" "but got " "$got"
+}
+
+function assert_same() {
+ local expected="$1"
+ local actual="$2"
+
+ if [[ "$expected" != "$actual" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "but got " "${actual}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_equals() {
+ local expected="$1"
+ local actual="$2"
+
+ # Remove ANSI escape sequences (color codes)
+ local actual_cleaned
+ actual_cleaned=$(echo -e "$actual" | sed -r "s/\x1B\[[0-9;]*[mK]//g")
+ local expected_cleaned
+ expected_cleaned=$(echo -e "$expected" | sed -r "s/\x1B\[[0-9;]*[mK]//g")
+
+ # Remove all control characters and whitespace (optional, depending on your needs)
+ actual_cleaned=$(echo "$actual_cleaned" | tr -d '[:cntrl:]')
+ expected_cleaned=$(echo "$expected_cleaned" | tr -d '[:cntrl:]')
+
+ if [[ "$expected_cleaned" != "$actual_cleaned" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected_cleaned}" "but got " "${actual_cleaned}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_not_equals() {
+ local expected="$1"
+ local actual="$2"
+
+ # Remove ANSI escape sequences (color codes)
+ local actual_cleaned
+ actual_cleaned=$(echo -e "$actual" | sed -r "s/\x1B\[[0-9;]*[mK]//g")
+ local expected_cleaned
+ expected_cleaned=$(echo -e "$expected" | sed -r "s/\x1B\[[0-9;]*[mK]//g")
+
+ # Remove all control characters and whitespace (optional, depending on your needs)
+ actual_cleaned=$(echo "$actual_cleaned" | tr -d '[:cntrl:]')
+ expected_cleaned=$(echo "$expected_cleaned" | tr -d '[:cntrl:]')
+
+ if [[ "$expected_cleaned" == "$actual_cleaned" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected_cleaned}" "but got " "${actual_cleaned}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_empty() {
+ local expected="$1"
+
+ if [[ "$expected" != "" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "to be empty" "but got " "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_not_empty() {
+ local expected="$1"
+
+ if [[ "$expected" == "" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "to not be empty" "but got " "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_not_same() {
+ local expected="$1"
+ local actual="$2"
+
+ if [[ "$expected" == "$actual" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "but got " "${actual}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_contains() {
+ local expected="$1"
+ local actual_arr=("${@:2}")
+ local actual
+ actual=$(printf '%s\n' "${actual_arr[@]}")
+
+ if ! [[ $actual == *"$expected"* ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to contain" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_contains_ignore_case() {
+ local expected="$1"
+ local actual="$2"
+
+ shopt -s nocasematch
+
+ if ! [[ $actual =~ $expected ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to contain" "${expected}"
+ shopt -u nocasematch
+ return
+ fi
+
+ shopt -u nocasematch
+ state::add_assertions_passed
+}
+
+function assert_not_contains() {
+ local expected="$1"
+ local actual_arr=("${@:2}")
+ local actual
+ actual=$(printf '%s\n' "${actual_arr[@]}")
+
+ if [[ $actual == *"$expected"* ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to not contain" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_matches() {
+ local expected="$1"
+ local actual_arr=("${@:2}")
+ local actual
+ actual=$(printf '%s\n' "${actual_arr[@]}")
+
+ if ! [[ $actual =~ $expected ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to match" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_not_matches() {
+ local expected="$1"
+ local actual_arr=("${@:2}")
+ local actual
+ actual=$(printf '%s\n' "${actual_arr[@]}")
+
+ if [[ $actual =~ $expected ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to not match" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_exit_code() {
+ local actual_exit_code=${3-"$?"}
+ local expected_exit_code="$1"
+
+ if [[ "$actual_exit_code" -ne "$expected_exit_code" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual_exit_code}" "to be" "${expected_exit_code}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_successful_code() {
+ local actual_exit_code=${3-"$?"}
+ local expected_exit_code=0
+
+ if [[ "$actual_exit_code" -ne "$expected_exit_code" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual_exit_code}" "to be exactly" "${expected_exit_code}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_general_error() {
+ local actual_exit_code=${3-"$?"}
+ local expected_exit_code=1
+
+ if [[ "$actual_exit_code" -ne "$expected_exit_code" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual_exit_code}" "to be exactly" "${expected_exit_code}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_command_not_found() {
+ local actual_exit_code=${3-"$?"}
+ local expected_exit_code=127
+
+ if [[ $actual_exit_code -ne "$expected_exit_code" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual_exit_code}" "to be exactly" "${expected_exit_code}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_string_starts_with() {
+ local expected="$1"
+ local actual_arr=("${@:2}")
+ local actual
+ actual=$(printf '%s\n' "${actual_arr[@]}")
+
+ if ! [[ $actual =~ ^"$expected"* ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to start with" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_string_not_starts_with() {
+ local expected="$1"
+ local actual="$2"
+
+ if [[ $actual =~ ^"$expected"* ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to not start with" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_string_ends_with() {
+ local expected="$1"
+ local actual_arr=("${@:2}")
+ local actual
+ actual=$(printf '%s\n' "${actual_arr[@]}")
+
+ if ! [[ $actual =~ .*"$expected"$ ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to end with" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_string_not_ends_with() {
+ local expected="$1"
+ local actual_arr=("${@:2}")
+ local actual
+ actual=$(printf '%s\n' "${actual_arr[@]}")
+
+ if [[ $actual =~ .*"$expected"$ ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to not end with" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_less_than() {
+ local expected="$1"
+ local actual="$2"
+
+ if ! [[ "$actual" -lt "$expected" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to be less than" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_less_or_equal_than() {
+ local expected="$1"
+ local actual="$2"
+
+ if ! [[ "$actual" -le "$expected" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to be less or equal than" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_greater_than() {
+ local expected="$1"
+ local actual="$2"
+
+ if ! [[ "$actual" -gt "$expected" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to be greater than" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_greater_or_equal_than() {
+ local expected="$1"
+ local actual="$2"
+
+ if ! [[ "$actual" -ge "$expected" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual}" "to be greater or equal than" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_line_count() {
+ local expected="$1"
+ local input_arr=("${@:2}")
+ local input_str
+ input_str=$(printf '%s\n' "${input_arr[@]}")
+
+ if [ -z "$input_str" ]; then
+ local actual=0
+ else
+ local actual
+ actual=$(echo "$input_str" | wc -l | tr -d '[:blank:]')
+ additional_new_lines=$(grep -o '\\n' <<< "$input_str" | wc -l | tr -d '[:blank:]')
+ ((actual+=additional_new_lines))
+ fi
+
+ if [[ "$expected" != "$actual" ]]; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${input_str}"\
+ "to contain number of lines equal to" "${expected}"\
+ "but found" "${actual}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+# assert_arrays.sh
+
+function assert_array_contains() {
+ local expected="$1"
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ shift
+
+ local actual=("${@}")
+
+ if ! [[ "${actual[*]}" == *"$expected"* ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual[*]}" "to contain" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_array_not_contains() {
+ local expected="$1"
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ shift
+ local actual=("$@")
+
+ if [[ "${actual[*]}" == *"$expected"* ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${actual[*]}" "to not contain" "${expected}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+# assert_files.sh
+
+function assert_file_exists() {
+ local expected="$1"
+ local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -f "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to exist but" "do not exist"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_file_not_exists() {
+ local expected="$1"
+ local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ -f "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to not exist but" "the file exists"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_file() {
+ local expected="$1"
+ local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -f "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be a file" "but is not a file"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_file_empty() {
+ local expected="$1"
+ local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ -s "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be empty" "but is not empty"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_files_equals() {
+ local expected="$1"
+ local actual="$2"
+
+ if [[ "$(diff -u "$expected" "$actual")" != '' ]] ; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+
+ console_results::print_failed_test "${label}" "${expected}" "Compared" "${actual}" \
+ "Diff" "$(diff -u "$expected" "$actual" | sed '1,2d')"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_files_not_equals() {
+ local expected="$1"
+ local actual="$2"
+
+ if [[ "$(diff -u "$expected" "$actual")" == '' ]] ; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+
+ console_results::print_failed_test "${label}" "${expected}" "Compared" "${actual}" \
+ "Diff" "Files are equals"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_file_contains() {
+ local file="$1"
+ local string="$2"
+
+ if ! grep -F -q "$string" "$file"; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+
+ console_results::print_failed_test "${label}" "${file}" "to contain" "${string}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_file_not_contains() {
+ local file="$1"
+ local string="$2"
+
+ if grep -q "$string" "$file"; then
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+
+ console_results::print_failed_test "${label}" "${file}" "to not contain" "${string}"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+# assert_folders.sh
+
+function assert_directory_exists() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to exist but" "do not exist"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_directory_not_exists() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ -d "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to not exist but" "the directory exists"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_directory() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be a directory" "but is not a directory"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_directory_empty() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" || -n "$(ls -A "$expected")" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be empty" "but is not empty"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_directory_not_empty() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" || -z "$(ls -A "$expected")" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to not be empty" "but is empty"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_directory_readable() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" || ! -r "$expected" || ! -x "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be readable" "but is not readable"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_directory_not_readable() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" ]] || [[ -r "$expected" && -x "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be not readable" "but is readable"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_directory_writable() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" || ! -w "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be writable" "but is not writable"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_is_directory_not_writable() {
+ local expected="$1"
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ ! -d "$expected" || -w "$expected" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "to be not writable" "but is writable"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+# assert_snapshot.sh
+
+function assert_match_snapshot() {
+ local actual
+ actual=$(echo -n "$1" | tr -d '\r')
+ local directory
+ directory="./$(dirname "${BASH_SOURCE[1]}")/snapshots"
+ local test_file
+ test_file="$(helper::normalize_variable_name "$(basename "${BASH_SOURCE[1]}")")"
+ local snapshot_name
+ snapshot_name="$(helper::normalize_variable_name "${FUNCNAME[1]}").snapshot"
+ local snapshot_file
+ snapshot_file="${directory}/${test_file}.${snapshot_name}"
+
+ if [[ ! -f "$snapshot_file" ]]; then
+ mkdir -p "$directory"
+ echo "$actual" > "$snapshot_file"
+
+ state::add_assertions_snapshot
+ return
+ fi
+
+ local snapshot
+ snapshot=$(tr -d '\r' < "$snapshot_file")
+
+ if [[ "$actual" != "$snapshot" ]]; then
+ local label
+ label=$(helper::normalize_test_function_name "${FUNCNAME[1]}")
+
+ state::add_assertions_failed
+ console_results::print_failed_snapshot_test "$label" "$snapshot_file"
+
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_match_snapshot_ignore_colors() {
+ local actual
+ actual=$(echo -n "$1" | sed -r 's/\x1B\[[0-9;]*[mK]//g' | tr -d '\r')
+
+ local directory
+ directory="./$(dirname "${BASH_SOURCE[1]}")/snapshots"
+ local test_file
+ test_file="$(helper::normalize_variable_name "$(basename "${BASH_SOURCE[1]}")")"
+ local snapshot_name
+ snapshot_name="$(helper::normalize_variable_name "${FUNCNAME[1]}").snapshot"
+ local snapshot_file
+ snapshot_file="${directory}/${test_file}.${snapshot_name}"
+
+ if [[ ! -f "$snapshot_file" ]]; then
+ mkdir -p "$directory"
+ echo "$actual" > "$snapshot_file"
+
+ state::add_assertions_snapshot
+ return
+ fi
+
+ local snapshot
+ snapshot=$(tr -d '\r' < "$snapshot_file")
+
+ if [[ "$actual" != "$snapshot" ]]; then
+ local label
+ label=$(helper::normalize_test_function_name "${FUNCNAME[1]}")
+
+ state::add_assertions_failed
+ console_results::print_failed_snapshot_test "$label" "$snapshot_file"
+
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+# skip_todo.sh
+
+function skip() {
+ local reason=${1-}
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+
+ console_results::print_skipped_test "${label}" "${reason}"
+
+ state::add_assertions_skipped
+}
+
+function todo() {
+ local pending=${1-}
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+
+ console_results::print_incomplete_test "${label}" "${pending}"
+
+ state::add_assertions_incomplete
+}
+
+# test_doubles.sh
+
+declare -a MOCKED_FUNCTIONS=()
+
+function unmock() {
+ local command=$1
+
+ for i in "${!MOCKED_FUNCTIONS[@]}"; do
+ if [[ "${MOCKED_FUNCTIONS[$i]}" == "$command" ]]; then
+ unset "MOCKED_FUNCTIONS[$i]"
+ unset -f "$command"
+ local variable
+ variable="$(helper::normalize_variable_name "$command")"
+ local times_file_var="${variable}_times_file"
+ local params_file_var="${variable}_params_file"
+ [[ -f "${!times_file_var-}" ]] && rm -f "${!times_file_var}"
+ [[ -f "${!params_file_var-}" ]] && rm -f "${!params_file_var}"
+ unset "$times_file_var"
+ unset "$params_file_var"
+ break
+ fi
+ done
+}
+
+function mock() {
+ local command=$1
+ shift
+
+ if [[ $# -gt 0 ]]; then
+ eval "function $command() { $* ; }"
+ else
+ eval "function $command() { echo \"$($CAT)\" ; }"
+ fi
+
+ export -f "${command?}"
+
+ MOCKED_FUNCTIONS+=("$command")
+}
+
+function spy() {
+ local command=$1
+ local variable
+ variable="$(helper::normalize_variable_name "$command")"
+
+ local times_file params_file
+ local test_id="${BASHUNIT_CURRENT_TEST_ID:-global}"
+ times_file=$(temp_file "${test_id}_${variable}_times")
+ params_file=$(temp_file "${test_id}_${variable}_params")
+ echo 0 > "$times_file"
+ : > "$params_file"
+ export "${variable}_times_file"="$times_file"
+ export "${variable}_params_file"="$params_file"
+
+ eval "function $command() {
+ echo \"\$*\" >> '$params_file'
+ local _c=\$(cat '$times_file')
+ _c=\$((_c+1))
+ echo \"\$_c\" > '$times_file'
+ }"
+
+ export -f "${command?}"
+
+ MOCKED_FUNCTIONS+=("$command")
+}
+
+function assert_have_been_called() {
+ local command=$1
+ local variable
+ variable="$(helper::normalize_variable_name "$command")"
+ local file_var="${variable}_times_file"
+ local times=0
+ if [[ -f "${!file_var-}" ]]; then
+ times=$(cat "${!file_var}")
+ fi
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+
+ if [[ $times -eq 0 ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${command}" "to has been called" "once"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_have_been_called_with() {
+ local expected=$1
+ local command=$2
+ local third_arg="${3:-}"
+ local fourth_arg="${4:-}"
+
+ local index=""
+ local label=""
+ if [[ -n $third_arg && $third_arg =~ ^[0-9]+$ ]]; then
+ index=$third_arg
+ label="${fourth_arg:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+ else
+ label="${third_arg:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+ index="$fourth_arg"
+ fi
+
+ local variable
+ variable="$(helper::normalize_variable_name "$command")"
+ local file_var="${variable}_params_file"
+ local params=""
+ if [[ -f "${!file_var-}" ]]; then
+ if [[ -n $index ]]; then
+ params=$(sed -n "${index}p" "${!file_var}")
+ else
+ params=$(tail -n 1 "${!file_var}")
+ fi
+ fi
+
+ if [[ "$expected" != "$params" ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" "but got " "$params"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_have_been_called_times() {
+ local expected=$1
+ local command=$2
+ local variable
+ variable="$(helper::normalize_variable_name "$command")"
+ local file_var="${variable}_times_file"
+ local times=0
+ if [[ -f "${!file_var-}" ]]; then
+ times=$(cat "${!file_var}")
+ fi
+ local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+ if [[ $times -ne $expected ]]; then
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${command}" \
+ "to has been called" "${expected} times" \
+ "actual" "${times} times"
+ return
+ fi
+
+ state::add_assertions_passed
+}
+
+function assert_not_called() {
+ local command=$1
+ local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}"
+ assert_have_been_called_times 0 "$command" "$label"
+}
+
+# reports.sh
+# shellcheck disable=SC2155
+
+_REPORTS_TEST_FILES=()
+_REPORTS_TEST_NAMES=()
+_REPORTS_TEST_STATUSES=()
+_REPORTS_TEST_DURATIONS=()
+_REPORTS_TEST_ASSERTIONS=()
+
+function reports::add_test_snapshot() {
+ reports::add_test "$1" "$2" "$3" "$4" "snapshot"
+}
+
+function reports::add_test_incomplete() {
+ reports::add_test "$1" "$2" "$3" "$4" "incomplete"
+}
+
+function reports::add_test_skipped() {
+ reports::add_test "$1" "$2" "$3" "$4" "skipped"
+}
+
+function reports::add_test_passed() {
+ reports::add_test "$1" "$2" "$3" "$4" "passed"
+}
+
+function reports::add_test_failed() {
+ reports::add_test "$1" "$2" "$3" "$4" "failed"
+}
+
+function reports::add_test() {
+ local file="$1"
+ local test_name="$2"
+ local duration="$3"
+ local assertions="$4"
+ local status="$5"
+
+ _REPORTS_TEST_FILES+=("$file")
+ _REPORTS_TEST_NAMES+=("$test_name")
+ _REPORTS_TEST_STATUSES+=("$status")
+ _REPORTS_TEST_ASSERTIONS+=("$assertions")
+ _REPORTS_TEST_DURATIONS+=("$duration")
+}
+
+function reports::generate_junit_xml() {
+ local output_file="$1"
+
+ local test_passed=$(state::get_tests_passed)
+ local tests_skipped=$(state::get_tests_skipped)
+ local tests_incomplete=$(state::get_tests_incomplete)
+ local tests_snapshot=$(state::get_tests_snapshot)
+ local tests_failed=$(state::get_tests_failed)
+ local time=$(clock::total_runtime_in_milliseconds)
+
+ {
+ echo ""
+ echo ""
+ echo " "
+
+ for i in "${!_REPORTS_TEST_NAMES[@]}"; do
+ local file="${_REPORTS_TEST_FILES[$i]}"
+ local name="${_REPORTS_TEST_NAMES[$i]}"
+ local assertions="${_REPORTS_TEST_ASSERTIONS[$i]}"
+ local status="${_REPORTS_TEST_STATUSES[$i]}"
+ local test_time="${_REPORTS_TEST_DURATIONS[$i]}"
+
+ echo " "
+ echo " "
+ done
+
+ echo " "
+ echo ""
+ } > "$output_file"
+}
+
+function reports::generate_report_html() {
+ local output_file="$1"
+
+ local test_passed=$(state::get_tests_passed)
+ local tests_skipped=$(state::get_tests_skipped)
+ local tests_incomplete=$(state::get_tests_incomplete)
+ local tests_snapshot=$(state::get_tests_snapshot)
+ local tests_failed=$(state::get_tests_failed)
+ local time=$(clock::total_runtime_in_milliseconds)
+
+ # Temporary file to store test cases by file
+ local temp_file="temp_test_cases.txt"
+
+ # Collect test cases by file
+ : > "$temp_file" # Clear temp file if it exists
+ for i in "${!_REPORTS_TEST_NAMES[@]}"; do
+ local file="${_REPORTS_TEST_FILES[$i]}"
+ local name="${_REPORTS_TEST_NAMES[$i]}"
+ local status="${_REPORTS_TEST_STATUSES[$i]}"
+ local test_time="${_REPORTS_TEST_DURATIONS[$i]}"
+ local test_case="$file|$name|$status|$test_time"
+
+ echo "$test_case" >> "$temp_file"
+ done
+
+ {
+ echo ""
+ echo ""
+ echo ""
+ echo " "
+ echo " "
+ echo " Test Report"
+ echo " "
+ echo ""
+ echo ""
+ echo " Test Report
"
+ echo " "
+ echo " "
+ echo " "
+ echo " Total Tests | "
+ echo " Passed | "
+ echo " Failed | "
+ echo " Incomplete | "
+ echo " Skipped | "
+ echo " Snapshot | "
+ echo " Time (ms) | "
+ echo "
"
+ echo " "
+ echo " "
+ echo " "
+ echo " ${#_REPORTS_TEST_NAMES[@]} | "
+ echo " $test_passed | "
+ echo " $tests_failed | "
+ echo " $tests_incomplete | "
+ echo " $tests_skipped | "
+ echo " $tests_snapshot | "
+ echo " $time | "
+ echo "
"
+ echo " "
+ echo "
"
+ echo " Time: $time ms
"
+
+ # Read the temporary file and group by file
+ local current_file=""
+ while IFS='|' read -r file name status test_time; do
+ if [ "$file" != "$current_file" ]; then
+ if [ -n "$current_file" ]; then
+ echo " "
+ echo " "
+ fi
+ echo " File: $file
"
+ echo " "
+ echo " "
+ echo " "
+ echo " Test Name | "
+ echo " Status | "
+ echo " Time (ms) | "
+ echo "
"
+ echo " "
+ echo " "
+ current_file="$file"
+ fi
+ echo " "
+ echo " $name | "
+ echo " $status | "
+ echo " $test_time | "
+ echo "
"
+ done < "$temp_file"
+
+ # Close the last table
+ if [ -n "$current_file" ]; then
+ echo " "
+ echo "
"
+ fi
+
+ echo ""
+ echo ""
+ } > "$output_file"
+
+ # Clean up temporary file
+ rm -f "$temp_file"
+}
+
+# runner.sh
+# shellcheck disable=SC2155
+
+function runner::load_test_files() {
+ local filter=$1
+ shift
+ local files=("${@}")
+
+ for test_file in "${files[@]}"; do
+ if [[ ! -f $test_file ]]; then
+ continue
+ fi
+ # shellcheck source=/dev/null
+ source "$test_file"
+ runner::run_set_up_before_script
+ if parallel::is_enabled; then
+ runner::call_test_functions "$test_file" "$filter" 2>/dev/null &
+ else
+ runner::call_test_functions "$test_file" "$filter"
+ fi
+ runner::run_tear_down_after_script
+ runner::clean_set_up_and_tear_down_after_script
+ done
+
+ if parallel::is_enabled; then
+ wait
+ runner::spinner &
+ local spinner_pid=$!
+ parallel::aggregate_test_results "$TEMP_DIR_PARALLEL_TEST_SUITE"
+ # Kill the spinner once the aggregation finishes
+ disown "$spinner_pid" && kill "$spinner_pid" &>/dev/null
+ printf "\r " # Clear the spinner output
+ fi
+}
+
+function runner::spinner() {
+ if env::is_simple_output_enabled; then
+ printf "\n"
+ fi
+
+ local delay=0.1
+ local spin_chars="|/-\\"
+ while true; do
+ for ((i=0; i<${#spin_chars}; i++)); do
+ printf "\r%s" "${spin_chars:$i:1}"
+ sleep "$delay"
+ done
+ done
+}
+
+function runner::functions_for_script() {
+ local script="$1"
+ local all_fn_names="$2"
+
+ # Filter the names down to the ones defined in the script, sort them by line number
+ shopt -s extdebug
+ # shellcheck disable=SC2086
+ declare -F $all_fn_names |
+ awk -v s="$script" '$3 == s {print $1" " $2}' |
+ sort -k2 -n |
+ awk '{print $1}'
+ shopt -u extdebug
+}
+
+function runner::call_test_functions() {
+ local script="$1"
+ local filter="$2"
+ local prefix="test"
+ # Use declare -F to list all function names
+ local all_fn_names=$(declare -F | awk '{print $3}')
+ local filtered_functions=$(helper::get_functions_to_run "$prefix" "$filter" "$all_fn_names")
+ # shellcheck disable=SC2207
+ local functions_to_run=($(runner::functions_for_script "$script" "$filtered_functions"))
+
+ if [[ "${#functions_to_run[@]}" -le 0 ]]; then
+ return
+ fi
+
+ runner::render_running_file_header
+ helper::check_duplicate_functions "$script" || true
+
+ for fn_name in "${functions_to_run[@]}"; do
+ if parallel::is_enabled && parallel::must_stop_on_failure; then
+ break
+ fi
+
+ local provider_data=()
+ while IFS=" " read -r line; do
+ provider_data+=("$line")
+ done <<< "$(helper::get_provider_data "$fn_name" "$script")"
+
+ # No data provider found
+ if [[ "${#provider_data[@]}" -eq 0 ]]; then
+ runner::run_test "$script" "$fn_name"
+ unset fn_name
+ continue
+ fi
+
+ # Execute the test function for each line of data
+ for data in "${provider_data[@]}"; do
+ IFS=" " read -r -a args <<< "$data"
+ if [ "${#args[@]}" -gt 1 ]; then
+ runner::run_test "$script" "$fn_name" "${args[@]}"
+ else
+ runner::run_test "$script" "$fn_name" "$data"
+ fi
+ done
+ unset fn_name
+ done
+
+ if ! env::is_simple_output_enabled; then
+ echo ""
+ fi
+}
+
+function runner::render_running_file_header() {
+ if parallel::is_enabled; then
+ return
+ fi
+
+ if ! env::is_simple_output_enabled; then
+ if env::is_verbose_enabled; then
+ printf "\n${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" "Running $script"
+ else
+ printf "${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" "Running $script"
+ fi
+ elif env::is_verbose_enabled; then
+ printf "\n\n${_COLOR_BOLD}%s${_COLOR_DEFAULT}" "Running $script"
+ fi
+}
+
+function runner::run_test() {
+ local start_time
+ start_time=$(clock::now)
+
+ local test_file="$1"
+ shift
+ local fn_name="$1"
+ shift
+
+ # Export a unique test identifier so that test doubles can
+ # create temporary files scoped per test run. This prevents
+ # race conditions when running tests in parallel.
+ local sanitized_fn_name
+ sanitized_fn_name="$(helper::normalize_variable_name "$fn_name")"
+ if env::is_parallel_run_enabled; then
+ export BASHUNIT_CURRENT_TEST_ID="${sanitized_fn_name}_$$_$(random_str 6)"
+ else
+ export BASHUNIT_CURRENT_TEST_ID="${sanitized_fn_name}_$$"
+ fi
+
+ local interpolated_fn_name="$(helper::interpolate_function_name "$fn_name" "$@")"
+ local current_assertions_failed="$(state::get_assertions_failed)"
+ local current_assertions_snapshot="$(state::get_assertions_snapshot)"
+ local current_assertions_incomplete="$(state::get_assertions_incomplete)"
+ local current_assertions_skipped="$(state::get_assertions_skipped)"
+
+ # (FD = File Descriptor)
+ # Duplicate the current std-output (FD 1) and assigns it to FD 3.
+ # This means that FD 3 now points to wherever the std-output was pointing.
+ exec 3>&1
+
+ local test_execution_result=$(
+ trap '
+ state::set_test_exit_code $?
+ runner::run_tear_down
+ runner::clear_mocks
+ state::export_subshell_context
+ ' EXIT
+ state::initialize_assertions_count
+ runner::run_set_up
+
+ # 2>&1: Redirects the std-error (FD 2) to the std-output (FD 1).
+ # points to the original std-output.
+ "$fn_name" "$@" 2>&1
+
+ )
+
+ # Closes FD 3, which was used temporarily to hold the original stdout.
+ exec 3>&-
+
+ local end_time=$(clock::now)
+ local duration_ns=$(math::calculate "($end_time - $start_time) ")
+ local duration=$(math::calculate "$duration_ns / 1000000")
+
+ if env::is_verbose_enabled; then
+ if env::is_simple_output_enabled; then
+ echo ""
+ fi
+
+ printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '='
+ printf "%s\n" "File: $test_file"
+ printf "%s\n" "Function: $fn_name"
+ printf "%s\n" "Duration: $duration ms"
+ local raw_text=${test_execution_result%%##ASSERTIONS_*}
+ [[ -n $raw_text ]] && printf "%s" "Raw text: ${test_execution_result%%##ASSERTIONS_*}"
+ printf "%s\n" "##ASSERTIONS_${test_execution_result#*##ASSERTIONS_}"
+ printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '-'
+ fi
+
+ local subshell_output=$(runner::decode_subshell_output "$test_execution_result")
+
+ if [[ -n "$subshell_output" ]]; then
+ # Formatted as "[type]line" @see `state::print_line()`
+ local type="${subshell_output%%]*}" # Remove everything after "]"
+ type="${type#[}" # Remove the leading "["
+ local line="${subshell_output#*]}" # Remove everything before and including "]"
+
+ # Replace [type] with a newline to split the messages
+ line=$(echo "$line" | sed -e 's/\[failed\]/\n/g' \
+ -e 's/\[skipped\]/\n/g' \
+ -e 's/\[incomplete\]/\n/g')
+
+ state::print_line "$type" "$line"
+
+ subshell_output=$line
+ fi
+
+ local runtime_output="${test_execution_result%%##ASSERTIONS_*}"
+
+ local runtime_error=""
+ for error in "command not found" "unbound variable" "permission denied" \
+ "no such file or directory" "syntax error" "bad substitution" \
+ "division by 0" "cannot allocate memory" "bad file descriptor" \
+ "segmentation fault" "illegal option" "argument list too long" \
+ "readonly variable" "missing keyword" "killed" \
+ "cannot execute binary file" "invalid arithmetic operator"; do
+ if [[ "$runtime_output" == *"$error"* ]]; then
+ runtime_error=$(echo "${runtime_output#*: }" | tr -d '\n')
+ break
+ fi
+ done
+
+ runner::parse_result "$fn_name" "$test_execution_result" "$@"
+
+ local total_assertions="$(state::calculate_total_assertions "$test_execution_result")"
+ local test_exit_code="$(state::get_test_exit_code)"
+
+ if [[ -n $runtime_error || $test_exit_code -ne 0 ]]; then
+ state::add_tests_failed
+ console_results::print_error_test "$fn_name" "$runtime_error"
+ reports::add_test_failed "$test_file" "$fn_name" "$duration" "$total_assertions"
+ runner::write_failure_result_output "$test_file" "$runtime_error"
+ return
+ fi
+
+ if [[ "$current_assertions_failed" != "$(state::get_assertions_failed)" ]]; then
+ state::add_tests_failed
+ reports::add_test_failed "$test_file" "$fn_name" "$duration" "$total_assertions"
+ runner::write_failure_result_output "$test_file" "$subshell_output"
+
+ if env::is_stop_on_failure_enabled; then
+ if parallel::is_enabled; then
+ parallel::mark_stop_on_failure
+ else
+ exit "$EXIT_CODE_STOP_ON_FAILURE"
+ fi
+ fi
+ return
+ fi
+
+ if [[ "$current_assertions_snapshot" != "$(state::get_assertions_snapshot)" ]]; then
+ state::add_tests_snapshot
+ console_results::print_snapshot_test "$fn_name"
+ reports::add_test_snapshot "$test_file" "$fn_name" "$duration" "$total_assertions"
+ return
+ fi
+
+ if [[ "$current_assertions_incomplete" != "$(state::get_assertions_incomplete)" ]]; then
+ state::add_tests_incomplete
+ reports::add_test_incomplete "$test_file" "$fn_name" "$duration" "$total_assertions"
+ return
+ fi
+
+ if [[ "$current_assertions_skipped" != "$(state::get_assertions_skipped)" ]]; then
+ state::add_tests_skipped
+ reports::add_test_skipped "$test_file" "$fn_name" "$duration" "$total_assertions"
+ return
+ fi
+
+ local label="$(helper::normalize_test_function_name "$fn_name" "$interpolated_fn_name")"
+
+ if [[ "$fn_name" == "$interpolated_fn_name" ]]; then
+ console_results::print_successful_test "${label}" "$duration" "$@"
+ else
+ console_results::print_successful_test "${label}" "$duration"
+ fi
+ state::add_tests_passed
+ reports::add_test_passed "$test_file" "$fn_name" "$duration" "$total_assertions"
+}
+
+function runner::decode_subshell_output() {
+ local test_execution_result="$1"
+
+ local test_output_base64="${test_execution_result##*##TEST_OUTPUT=}"
+ test_output_base64="${test_output_base64%%##*}"
+
+ local subshell_output
+ if command -v base64 >/dev/null; then
+ echo "$test_output_base64" | base64 -d
+ else
+ echo "$test_output_base64" | openssl enc -d -base64
+ fi
+}
+
+function runner::parse_result() {
+ local fn_name=$1
+ shift
+ local execution_result=$1
+ shift
+ local args=("$@")
+
+ if parallel::is_enabled; then
+ runner::parse_result_parallel "$fn_name" "$execution_result" "${args[@]}"
+ else
+ runner::parse_result_sync "$fn_name" "$execution_result"
+ fi
+}
+
+function runner::parse_result_parallel() {
+ local fn_name=$1
+ shift
+ local execution_result=$1
+ shift
+ local args=("$@")
+
+ local test_suite_dir="${TEMP_DIR_PARALLEL_TEST_SUITE}/$(basename "$test_file" .sh)"
+ mkdir -p "$test_suite_dir"
+
+ local sanitized_args
+ sanitized_args=$(echo "${args[*]}" | tr '[:upper:]' '[:lower:]' | sed -E 's/[^a-z0-9]+/-/g; s/^-|-$//')
+ local template
+ if [[ -z "$sanitized_args" ]]; then
+ template="${fn_name}.XXXXXX.result"
+ else
+ template="${fn_name}-${sanitized_args}.XXXXXX.result"
+ fi
+
+ local unique_test_result_file
+ unique_test_result_file=$(mktemp -p "$test_suite_dir" "$template")
+
+ log "debug" "[PARA]" "fn_name:$fn_name" "execution_result:$execution_result"
+
+ runner::parse_result_sync "$fn_name" "$execution_result"
+
+ echo "$execution_result" > "$unique_test_result_file"
+}
+
+# shellcheck disable=SC2295
+function runner::parse_result_sync() {
+ local fn_name=$1
+ local execution_result=$2
+
+ local result_line
+ result_line=$(echo "$execution_result" | tail -n 1)
+
+ local assertions_failed=0
+ local assertions_passed=0
+ local assertions_skipped=0
+ local assertions_incomplete=0
+ local assertions_snapshot=0
+ local test_exit_code=0
+
+ local regex
+ regex='ASSERTIONS_FAILED=([0-9]*)##'
+ regex+='ASSERTIONS_PASSED=([0-9]*)##'
+ regex+='ASSERTIONS_SKIPPED=([0-9]*)##'
+ regex+='ASSERTIONS_INCOMPLETE=([0-9]*)##'
+ regex+='ASSERTIONS_SNAPSHOT=([0-9]*)##'
+ regex+='TEST_EXIT_CODE=([0-9]*)'
+
+ if [[ $result_line =~ $regex ]]; then
+ assertions_failed="${BASH_REMATCH[1]}"
+ assertions_passed="${BASH_REMATCH[2]}"
+ assertions_skipped="${BASH_REMATCH[3]}"
+ assertions_incomplete="${BASH_REMATCH[4]}"
+ assertions_snapshot="${BASH_REMATCH[5]}"
+ test_exit_code="${BASH_REMATCH[6]}"
+ fi
+
+ log "debug" "[SYNC]" "fn_name:$fn_name" "execution_result:$execution_result"
+
+ ((_ASSERTIONS_PASSED += assertions_passed)) || true
+ ((_ASSERTIONS_FAILED += assertions_failed)) || true
+ ((_ASSERTIONS_SKIPPED += assertions_skipped)) || true
+ ((_ASSERTIONS_INCOMPLETE += assertions_incomplete)) || true
+ ((_ASSERTIONS_SNAPSHOT += assertions_snapshot)) || true
+ ((_TEST_EXIT_CODE += test_exit_code)) || true
+}
+
+function runner::write_failure_result_output() {
+ local test_file=$1
+ local error_msg=$2
+
+ local test_nr="*"
+ if ! parallel::is_enabled; then
+ test_nr=$(state::get_tests_failed)
+ fi
+
+ echo -e "$test_nr) $test_file\n$error_msg" >> "$FAILURES_OUTPUT_PATH"
+}
+
+function runner::run_set_up() {
+ helper::execute_function_if_exists 'set_up'
+}
+
+function runner::run_set_up_before_script() {
+ helper::execute_function_if_exists 'set_up_before_script'
+}
+
+function runner::run_tear_down() {
+ helper::execute_function_if_exists 'tear_down'
+}
+
+function runner::clear_mocks() {
+ for i in "${!MOCKED_FUNCTIONS[@]}"; do
+ unmock "${MOCKED_FUNCTIONS[$i]}"
+ done
+}
+
+function runner::run_tear_down_after_script() {
+ helper::execute_function_if_exists 'tear_down_after_script'
+}
+
+function runner::clean_set_up_and_tear_down_after_script() {
+ helper::unset_if_exists 'set_up'
+ helper::unset_if_exists 'tear_down'
+ helper::unset_if_exists 'set_up_before_script'
+ helper::unset_if_exists 'tear_down_after_script'
+}
+
+# bashunit.sh
+
+# This file provides a facade to developers who wants
+# to interact with the internals of bashunit.
+# e.g. adding custom assertions
+
+function bashunit::assertion_failed() {
+ local expected=$1
+ local actual=$2
+ local failure_condition_message=${3:-"but got "}
+
+ local label
+ label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")"
+ state::add_assertions_failed
+ console_results::print_failed_test "${label}" "${expected}" \
+ "$failure_condition_message" "${actual}"
+}
+
+function bashunit::assertion_passed() {
+ state::add_assertions_passed
+}
+
+# main.sh
+
+function main::exec_tests() {
+ local filter=$1
+ local files=("${@:2}")
+
+ local test_files=()
+ while IFS= read -r line; do
+ test_files+=("$line")
+ done < <(helper::load_test_files "$filter" "${files[@]}")
+
+ if [[ ${#test_files[@]} -eq 0 || -z "${test_files[0]}" ]]; then
+ printf "%sError: At least one file path is required.%s\n" "${_COLOR_FAILED}" "${_COLOR_DEFAULT}"
+ console_header::print_help
+ exit 1
+ fi
+
+ # Trap SIGINT (Ctrl-C) and call the cleanup function
+ trap 'main::cleanup' SIGINT
+ trap '[[ $? -eq $EXIT_CODE_STOP_ON_FAILURE ]] && main::handle_stop_on_failure_sync' EXIT
+
+ if env::is_parallel_run_enabled && ! parallel::is_enabled; then
+ printf "%sWarning: Parallel tests are supported on macOS, Ubuntu and Windows.\n" "${_COLOR_INCOMPLETE}"
+ printf "For other OS (like Alpine), --parallel is not enabled due to inconsistent results,\n"
+ printf "particularly involving race conditions.%s " "${_COLOR_DEFAULT}"
+ printf "%sFallback using --no-parallel%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}"
+ fi
+
+ if parallel::is_enabled; then
+ parallel::reset
+ fi
+
+ console_header::print_version_with_env "$filter" "${test_files[@]}"
+
+ if env::is_verbose_enabled; then
+ if env::is_simple_output_enabled; then
+ echo ""
+ fi
+ printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '#'
+ printf "%s\n" "Filter: ${filter:-None}"
+ printf "%s\n" "Total files: ${#test_files[@]}"
+ printf "%s\n" "Test files:"
+ printf -- "- %s\n" "${test_files[@]}"
+ printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '.'
+ env::print_verbose
+ printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '#'
+ fi
+
+ runner::load_test_files "$filter" "${test_files[@]}"
+
+ if parallel::is_enabled; then
+ wait
+ fi
+
+ if parallel::is_enabled && parallel::must_stop_on_failure; then
+ printf "\r%sStop on failure enabled...%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}"
+ fi
+
+ console_results::print_failing_tests_and_reset
+ console_results::render_result
+ exit_code=$?
+
+ if [[ -n "$BASHUNIT_LOG_JUNIT" ]]; then
+ reports::generate_junit_xml "$BASHUNIT_LOG_JUNIT"
+ fi
+
+ if [[ -n "$BASHUNIT_REPORT_HTML" ]]; then
+ reports::generate_report_html "$BASHUNIT_REPORT_HTML"
+ fi
+
+ cleanup_temp_files
+ exit $exit_code
+}
+
+function main::cleanup() {
+ printf "%sCaught Ctrl-C, killing all child processes...%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}"
+ # Kill all child processes of this script
+ pkill -P $$
+ cleanup_temp_files
+ exit 1
+}
+
+function main::handle_stop_on_failure_sync() {
+ printf "\n%sStop on failure enabled...%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}"
+ console_results::print_failing_tests_and_reset
+ console_results::render_result
+ cleanup_temp_files
+ exit 1
+}
+
+function main::exec_assert() {
+ local original_assert_fn=$1
+ local args=("${@:2}")
+
+ local assert_fn=$original_assert_fn
+
+ # Check if the function exists
+ if ! type "$assert_fn" > /dev/null 2>&1; then
+ assert_fn="assert_$assert_fn"
+ if ! type "$assert_fn" > /dev/null 2>&1; then
+ echo "Function $original_assert_fn does not exist." 1>&2
+ exit 127
+ fi
+ fi
+
+ # Get the last argument safely by calculating the array length
+ local last_index=$((${#args[@]} - 1))
+ local last_arg="${args[$last_index]}"
+ local output=""
+ local inner_exit_code=0
+ local bashunit_exit_code=0
+
+ # Handle different assert_* functions
+ case "$assert_fn" in
+ assert_exit_code)
+ output=$(main::handle_assert_exit_code "$last_arg")
+ inner_exit_code=$?
+ # Remove the last argument and append the exit code
+ args=("${args[@]:0:last_index}")
+ args+=("$inner_exit_code")
+ ;;
+ *)
+ # Add more cases here for other assert_* handlers if needed
+ ;;
+ esac
+
+ if [[ -n "$output" ]]; then
+ echo "$output" 1>&1
+ assert_fn="assert_same"
+ fi
+
+ # Run the assertion function and write into stderr
+ "$assert_fn" "${args[@]}" 1>&2
+ bashunit_exit_code=$?
+
+ if [[ "$(state::get_tests_failed)" -gt 0 ]] || [[ "$(state::get_assertions_failed)" -gt 0 ]]; then
+ return 1
+ fi
+
+ return "$bashunit_exit_code"
+}
+
+function main::handle_assert_exit_code() {
+ local cmd="$1"
+ local output
+ local inner_exit_code=0
+
+ if [[ $(command -v "${cmd%% *}") ]]; then
+ output=$(eval "$cmd" 2>&1 || echo "inner_exit_code:$?")
+ local last_line
+ last_line=$(echo "$output" | tail -n 1)
+ if echo "$last_line" | grep -q 'inner_exit_code:[0-9]*'; then
+ inner_exit_code=$(echo "$last_line" | grep -o 'inner_exit_code:[0-9]*' | cut -d':' -f2)
+ if ! [[ $inner_exit_code =~ ^[0-9]+$ ]]; then
+ inner_exit_code=1
+ fi
+ output=$(echo "$output" | sed '$d')
+ fi
+ echo "$output"
+ return "$inner_exit_code"
+ else
+ echo "Command not found: $cmd" 1>&2
+ return 127
+ fi
+}
+
+#!/usr/bin/env bash
+set -euo pipefail
+
+# shellcheck disable=SC2034
+declare -r BASHUNIT_VERSION="0.20.0"
+
+# shellcheck disable=SC2155
+declare -r BASHUNIT_ROOT_DIR="$(dirname "${BASH_SOURCE[0]}")"
+export BASHUNIT_ROOT_DIR
+
+
+_ASSERT_FN=""
+_FILTER=""
+_ARGS=()
+
+check_os::init
+clock::init
+
+while [[ $# -gt 0 ]]; do
+ argument="$1"
+ case $argument in
+ -a|--assert)
+ _ASSERT_FN="$2"
+ shift
+ ;;
+ -f|--filter)
+ _FILTER="$2"
+ shift
+ ;;
+ -s|--simple)
+ export BASHUNIT_SIMPLE_OUTPUT=true
+ ;;
+ --detailed)
+ export BASHUNIT_SIMPLE_OUTPUT=false
+ ;;
+ --debug)
+ OUTPUT_FILE="${2:-}"
+ if [[ -n $OUTPUT_FILE ]]; then
+ exec > "$OUTPUT_FILE" 2>&1
+ fi
+ set -x
+ ;;
+ -S|--stop-on-failure)
+ export BASHUNIT_STOP_ON_FAILURE=true
+ ;;
+ -p|--parallel)
+ export BASHUNIT_PARALLEL_RUN=true
+ ;;
+ --no-parallel)
+ export BASHUNIT_PARALLEL_RUN=false
+ ;;
+ -e|--env|--boot)
+ # shellcheck disable=SC1090
+ source "$2"
+ shift
+ ;;
+ -l|--log-junit)
+ export BASHUNIT_LOG_JUNIT="$2";
+ shift
+ ;;
+ -r|--report-html)
+ export BASHUNIT_REPORT_HTML="$2";
+ shift
+ ;;
+ -vvv|--verbose)
+ export BASHUNIT_VERBOSE=true
+ ;;
+ -v|--version)
+ console_header::print_version
+ trap '' EXIT && exit 0
+ ;;
+ --upgrade)
+ upgrade::upgrade
+ trap '' EXIT && exit 0
+ ;;
+ -h|--help)
+ console_header::print_help
+ trap '' EXIT && exit 0
+ ;;
+ *)
+ while IFS='' read -r line; do
+ _ARGS+=("$line");
+ done < <(helper::find_files_recursive "$argument")
+ ;;
+ esac
+ shift
+done
+
+# shellcheck disable=SC1090
+[[ -f "$BASHUNIT_BOOTSTRAP" ]] && source "$BASHUNIT_BOOTSTRAP"
+
+set +eu
+
+if [[ -n "$_ASSERT_FN" ]]; then
+ main::exec_assert "$_ASSERT_FN" "${_ARGS[@]}"
+else
+ main::exec_tests "$_FILTER" "${_ARGS[@]}"
+fi
diff --git a/src/Runner.php b/src/Runner.php
index d527ea575e..37c3c65c7a 100644
--- a/src/Runner.php
+++ b/src/Runner.php
@@ -764,7 +764,7 @@ public function processFile($file)
* The reporting information returned by each child process is merged
* into the main reporter class.
*
- * @param array $childProcs An array of child processes to wait for.
+ * @param array $childProcs An array of child processes to wait for.
*
* @return bool
*/
@@ -777,7 +777,8 @@ private function processChildProcs($childProcs)
while (count($childProcs) > 0) {
$pid = pcntl_waitpid(0, $status);
- if ($pid <= 0) {
+ if ($pid <= 0 || isset($childProcs[$pid]) === false) {
+ // No child or a child with an unmanaged PID was returned.
continue;
}
diff --git a/tests/EndToEnd/patches/Runner.php_inject_style_error.patch b/tests/EndToEnd/patches/Runner.php_inject_style_error.patch
new file mode 100644
index 0000000000..603c7e7825
--- /dev/null
+++ b/tests/EndToEnd/patches/Runner.php_inject_style_error.patch
@@ -0,0 +1,11 @@
+--- a/src/Runner.php
++++ b/src/Runner.php
+@@ -73,7 +73,7 @@ class Runner
+ $this->config = new Config();
+
+ // Init the run and load the rulesets to set additional config vars.
+- $this->init();
++ $this->init(); // This comment is not allowed here
+
+ // Print a list of sniffs in each of the supplied standards.
+ // We fudge the config here so that each standard is explained in isolation.
diff --git a/tests/EndToEnd/phpcbf_test.sh b/tests/EndToEnd/phpcbf_test.sh
new file mode 100644
index 0000000000..b4fd5cc69b
--- /dev/null
+++ b/tests/EndToEnd/phpcbf_test.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+function set_up() {
+ # Pick a subset of files to speed up the test suite.
+ FILES="src/Ruleset.php src/Runner.php"
+ TMPDIR="tmp-phpcbf"
+
+ mkdir "$(pwd)/$TMPDIR"
+}
+
+function tear_down() {
+ rm -r "$(pwd)/$TMPDIR"
+}
+
+function test_phpcbf_is_working() {
+ # We pick one file to speed up the test suite
+ OUTPUT="$(bin/phpcbf --no-cache ${FILES})"
+
+ assert_successful_code
+ assert_contains "No violations were found" "$OUTPUT"
+}
+
+function test_phpcbf_is_working_in_parallel() {
+ # We pick two files to speed up the test suite
+ OUTPUT="$(bin/phpcbf --no-cache --parallel=2 ${FILES})"
+
+ assert_successful_code
+ assert_contains "No violations were found" "$OUTPUT"
+}
+
+function test_phpcbf_returns_error_on_issues() {
+ # Copy & patch Runner.php with a style error so we can verify the error path
+ TMPFILE="$TMPDIR/Runner.php"
+ cp src/Runner.php "$(pwd)/$TMPFILE"
+ patch "$TMPFILE" tests/EndToEnd/patches/Runner.php_inject_style_error.patch
+
+ OUTPUT="$(bin/phpcbf --no-colors --no-cache "$TMPFILE")"
+ assert_exit_code 1
+
+ assert_contains "F 1 / 1 (100%)" "$OUTPUT"
+ assert_contains "A TOTAL OF 1 ERROR WERE FIXED IN 1 FILE" "$OUTPUT"
+}
+
+function test_phpcbf_bug_1112() {
+ # See https://github.com/PHPCSStandards/PHP_CodeSniffer/issues/1112
+ if [[ "$(uname)" == "Darwin" ]]; then
+ # Perform some magic with `& fg` to prevent the processes from turning into a background job.
+ assert_successful_code "$(bash -ic 'bash --init-file <(echo "echo \"Subprocess\"") -c "bin/phpcbf --no-cache --parallel=2 src/Ruleset.php src/Runner.php" & fg')"
+ else
+ # This is not needed on Linux / GitHub Actions
+ assert_successful_code "$(bash -ic 'bash --init-file <(echo "echo \"Subprocess\"") -c "bin/phpcbf --no-cache --parallel=2 src/Ruleset.php src/Runner.php"')"
+ fi
+}
diff --git a/tests/EndToEnd/phpcs_test.sh b/tests/EndToEnd/phpcs_test.sh
new file mode 100644
index 0000000000..b3a23f4af6
--- /dev/null
+++ b/tests/EndToEnd/phpcs_test.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+function set_up() {
+ # Pick a subset of files to speed up the test suite.
+ FILES="src/Ruleset.php src/Runner.php"
+ TMPDIR="tmp-phpcs"
+
+ mkdir "$(pwd)/$TMPDIR"
+}
+
+function tear_down() {
+ rm -r "$(pwd)/$TMPDIR"
+}
+
+function test_phpcs_is_working() {
+ assert_successful_code "$(bin/phpcs --no-cache ${FILES})"
+}
+
+function test_phpcs_is_working_in_parallel() {
+ assert_successful_code "$(bin/phpcs --no-cache --parallel=2 ${FILES})"
+}
+
+function test_phpcs_returns_error_on_issues() {
+ # Copy & patch Runner.php with a style error so we can verify the error path
+ TMPFILE="$TMPDIR/Runner.php"
+ cp src/Runner.php "$(pwd)/$TMPFILE"
+ patch "$TMPFILE" tests/EndToEnd/patches/Runner.php_inject_style_error.patch
+
+ OUTPUT="$(bin/phpcs --no-colors --no-cache "$TMPFILE")"
+ assert_exit_code 2
+
+ assert_contains "E 1 / 1 (100%)" "$OUTPUT"
+ assert_contains "FOUND 2 ERRORS AFFECTING 1 LINE" "$OUTPUT"
+}
+
+function test_phpcs_bug_1112() {
+ # See https://github.com/PHPCSStandards/PHP_CodeSniffer/issues/1112
+ if [[ "$(uname)" == "Darwin" ]]; then
+ # Perform some magic with `& fg` to prevent the processes from turning into a background job.
+ assert_successful_code "$(bash -ic 'bash --init-file <(echo "echo \"Subprocess\"") -c "bin/phpcs --no-cache --parallel=2 src/Ruleset.php src/Runner.php" & fg')"
+ else
+ # This is not needed on Linux / GitHub Actions
+ assert_successful_code "$(bash -ic 'bash --init-file <(echo "echo \"Subprocess\"") -c "bin/phpcs --no-cache --parallel=2 src/Ruleset.php src/Runner.php"')"
+ fi
+}