#!/bin/bash #============================================================================== # Copyright and license info is available in the LICENSE file included with # the Server Deployment Package (SDP), and also available online: # https://swarm.workshop.perforce.com/projects/perforce-software-sdp/view/main/LICENSE #------------------------------------------------------------------------------ set -u #============================================================================== # Declarations and Environment declare ThisScript="${0##*/}" declare Version=2.9.0 declare ThisUser= declare ThisHost=${HOSTNAME%%.*} declare CmdLine="${0} $*" declare -i Debug=${SDP_DEBUG:-0} declare -i ErrorCount=0 declare -i SilentMode=0 declare -i TestCount=0 declare -i TestID=0 declare -i TestPassCount=0 declare -i TestFailCount=0 declare -i TestSkipCount=0 declare -i AbortOnTestFailure=0 declare -i NoOp=0 declare H1="==============================================================================" declare H2="------------------------------------------------------------------------------" declare H3="||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||" declare Log= declare LogLink="/tmp/${ThisScript%.sh}.log" declare P4TestCmd= declare P4C= declare P4=p4 declare ExpectedExit= declare TestLog= declare -i RequireValidTests=1 declare -i ExpectedExitCode declare -i ActualExitCode declare ExpectedString= declare -i ExpectedStringInOutput=0 declare Comments= declare CLITestDataOK=1 declare LoadSDPEnv=1 declare CfgDir="/p4/sdp/test/bsw" declare CLITestDataFile= declare CfgFile= declare UserTestGroups= declare -i InUserTestGroup=0 declare TestGroup= # Values loaded from general config file. declare RunHost= declare TestTag= declare OutputFile=/tmp/sdp.test_output.$$.$RANDOM declare -i TestPassed declare -i Line=0 declare -i TestCount=0 declare -i TestPassCount=0 declare -i TestFailCount=0 GARBAGE+=" $OutputFile" #============================================================================== # Local Functions function msg () { echo -e "$*"; } function dbg () { [[ "$Debug" -eq 0 ]] || msg "DEBUG: $*"; } function errmsg () { msg "\\nError: ${1:-Unknown Error}\\n"; ErrorCount+=1; } function bail () { errmsg "${1:-Unknown Error}"; exit "$ErrorCount"; } function pass () { TestCount+=1; TestPassCount+=1; msg "PASS Test $TestCount"; } function fail () { TestCount+=1 TestFailCount+=1 msg "FAIL Test $TestCount" if [[ "$AbortOnTestFailure" -eq 1 ]]; then msg "Aborting after first test failure due to '-e'." exit 1 fi } #------------------------------------------------------------------------------ # Function: terminate function terminate { local garbageFile= # Disable signal trapping. trap - EXIT SIGINT SIGTERM dbg "$ThisScript: EXITCODE: $ErrorCount" if [[ -n "$GARBAGE" ]]; then for garbageFile in $GARBAGE; do rm -f "$garbageFile" done fi # Stop logging. [[ "$Log" == off ]] || msg "\\nLog is: $Log\\n${H1}\\n" # With the trap removed, exit. exit "$((ErrorCount+TestFailCount+TestSkipCount))" } #------------------------------------------------------------------------------ # Function: usage (required function) # # Input: # $1 - style, either -h (for short form) or -man (for man-page like format). #------------------------------------------------------------------------------ function usage { declare style=${1:--h} msg "USAGE for $ThisScript v$Version: $ThisScript [-cd <cfg_dir>] [-no_env] [-f] [-e] [-g <test_group_1>[,<test_group_2>,...]] [-L <log>] [-si] [-d|-D] or $ThisScript [-h|-man] " if [[ $style == -man ]]; then msg " DESCRIPTION: This script runs tests for the Server Deployment Package (SDP). Tests are defined in the file: $CLITestDataFile OPTIONS: -cd Specify '-cd <cfg_dir>' to specify an alternate location for the configuration directory. The specified directory must contain the ${CLITestDataFile##*/} file. See FILES for more information. The default is: $CfgDir -no_env Specify '-no_env' to avoid loading the SDP Shell Environment (p4_vars). This option is implied if /p4/common/bin/p4_vars does not exist. -f Use '-f' to bypass any invalid test entries in the command line test data. -e Use '-e' to abort immediately after the first test failure. By default, this script attempts to execute all tests in the test suite (in hopes of illuminating the most issues). The '-e' option changes the behavior to stop execution after a test failure, so as to preserve the state of the system. This may be useful for interactive debugging of scripted tests. -g <test_group_1>[,<test_group_2>,...] Specify '-g <test_group>' to specify a comma-delimited list of test groups to run. If not specified, the default is to execute all test groups. -L <log> Specify the path to a log file, or the special value 'off' to disable logging. By default, all output (stdout and stderr) goes to: /tmp/${ThisScript%.sh}.<timestamp>.log NOTE: This script is self-logging. That is, output displayed on the screen is simultaneously captured in the log file. It is not necessary (nor harmful) to use redirection operators like '> log' or '2>&1' or 'tee'. This script uses a fixed LogLink of /tmp/${ThisScript%.sh}.log which is updated to point to the newly created log for each run, so that this link reliably points to the most recent execution of the script. -si Operate silently. All output (stdout and stderr) is redirected to the log only; no output appears on the terminal. This cannot be used with '-L off'. HELP OPTIONS: -h Display short help message -man Display man-style help message DEBUGGING OPTIONS: -d Enable debug messages. -D Use bash 'set -x' extreme debugging verbosity. Implies '-d'. LOGGING: This script is self-logging. That is, output displayed on the screen is simultaneously captured in the log file. It is not necessary (nor harmful) to use redirection operators like '> log' or '2>&1' or 'tee'. This script uses a fixed log symlink /tmp/${ThisScript%.sh}.log which is updated to point to the newly created log for each run, so that this symlink reliably points to the most recent execution of the script (unless '-L off' was used). FILES: TEST CONFIG DIR Files related to testing live in a configuration directory. When used for testing in the Battle School Lab Environment, the directory is $CfgDir on the bos-helix-01 machine. Ths can be changed with the '-cd <cfg_dir>' option. HOST SPECIFIC TEST ENVIRONMENT FILE If a file named test_sdp.<hostname>.cfg is found for the current host, this file is sourced. It is expected to look something like this: declare RunHost=bos-helix-01 declare TestTag=\"test_sdp.\$RunHost\" declare TestWS=\"bruno_jam.\$TestTag\" declare TestWSRoot=\"/p4/1/tmp/\$TestWS\" export SDP_TEST_HOME=/p4/sdp/test/bsw The file is sourced to load any shell environment settings needed by command line tests to be executed, such as PATH adjustments. It must set values for RunHost and TestTag; other settings are optional. If no such host config file exists, it is equivalent to a file exsting with these contents: declare RunHost=\${HOSTNAME%%.*} declare TestTag=\"test.\$RunHost\" declare TestWS=\"bruno_jam.\$TestTag\" declare TestWSRoot=\"/p4/1/tmp/\$TestWS\" export SDP_TEST_HOME=/p4/sdp/test/bsw COMMAND LINE TEST CONFIG FILE The cli_tests.cfg file defines the command line tests to be executed, including expected exit codes and output for each. Open this file to get documentation of the expected format. ABOUT THE TEST DIRECTORY The /p4/sdp/test directory will not appear on a customer-deployed SDP in the real world. The /p4/sdp directory will exist, but the 'test' directory and 'bsw' subdirectory are populated only when using the //sdp/dev_insitu stream in a Battle School Lab Environment. EXAMPLES: For typical usage, no arguments are needed. Run this in a Battle School Lab Environment as perforce@bos-helix-01, after first having run 'lab qa' as student@bsw-lab-ui to prepare the environment. \$ cd /p4/sdp/test/bsw \$ ./$ThisScript " fi exit 1 } #============================================================================== # Command Line Processing declare -i shiftArgs=0 set +u while [[ $# -gt 0 ]]; do case $1 in (-cd) CfgDir="$2"; shiftArgs=1;; (-no_env) LoadSDPEnv=0;; (-f) RequireValidTests=0;; (-g) UserTestGroups="$2"; shiftArgs=1;; (-e) AbortOnTestFailure=1;; (-h) usage -h;; (-man) usage -man;; (-L) Log="$2"; shiftArgs=1;; (-si) SilentMode=1;; (-n) NoOp=1;; (-d) Debug=1;; (-D) Debug=1; set -x;; # Debug; use 'set -x' mode. (-*) usage -h "Unknown option ($1).";; (*) usage -h "Unknown parameter ($1).";; esac # Shift (modify $#) the appropriate number of times. shift; while [[ $shiftArgs -gt 0 ]]; do [[ $# -eq 0 ]] && usageError "Bad usage." shiftArgs=$shiftArgs-1 shift done done set -u #============================================================================== # Command Line Verification [[ -n "$Log" ]] || Log="/tmp/${ThisScript%.sh}.$(date +'%Y-%m-%d-%H%M%S').log" [[ "$SilentMode" -eq 1 && "$Log" == off ]] && \ usageError "Cannot use '-si' with '-L off'." #============================================================================== # Main Program trap terminate EXIT SIGINT SIGTERM if [[ "$Log" != off ]]; then touch "$Log" || bail "Couldn't touch log file [$Log]." # Redirect stdout and stderr to a log file. if [[ $SilentMode -eq 0 ]]; then exec > >(tee "$Log") exec 2>&1 else exec >"$Log" exec 2>&1 fi # Setup /tmp/test_sdp.log symlink so it points to the current log. rm -f "$LogLink" ln -s "$Log" "$LogLink" msg "${H1}\\nLog is: $Log\\n" fi ThisUser=$(id -n -u) msg "Started $ThisScript v$Version as $ThisUser@$ThisHost on $(date) as pid $$:\\nInitial Command Line:\\n$CmdLine\\n" CfgFile="$CfgDir/test_sdp.$ThisHost.cfg" CLITestDataFile="$CfgDir/cli_tests.cfg" if [[ -r "$CfgFile" ]]; then # shellcheck disable=SC1090 source "$CfgFile" || bail "Failed to load config file [$CfgFile]." else msg "No host config file found [$CfgFile]. Generating default host config file." # shellcheck disable=SC2016 { echo 'declare RunHost=${HOSTNAME%%.*}' echo 'declare TestTag="test.$RunHost"' echo 'declare TestWS="bruno_jam.$TestTag"' echo 'declare TestWSRoot="/p4/1/tmp/$TestWS"' echo 'export SDP_TEST_HOME=/p4/sdp/test/bsw' } > "$CfgFile" || bail "Could not generate host config file [$CfgFile]." # shellcheck disable=SC1090 source "$CfgFile" || bail "Failed to load config file [$CfgFile]." fi # Sanity check on values loaded from the config file. [[ -z "$TestTag" ]] && bail "Environment loaded from $CfgFile is missing variable definition for TestTag." if [[ $ThisHost == "$RunHost" ]]; then msg "Verified: Running on $RunHost." else bail "Not configured to run on host $ThisHost. Run only on $RunHost, as configured in $CfgFile." fi msg "Loading and verifying command line test data from: $CLITestDataFile" [[ -r "$CLITestDataFile" ]] || bail "Missing command line test data file [$CLITestDataFile]." # See the $CLITestDataFile file for details on the expected file format. # Short version: We're expecting one-line entries like this: # <P4Cmd>|<ExpectedExitCode>|<TestLog>|<ExpectedStringRegex>|<Comments> # The '<P4Cmd>' command *must* start with 'p4 ' Line=0; while read -r entry; do Line=$((Line+1)) # shellcheck disable=SC2116 [[ -z "$(echo "$entry")" ]] && continue [[ $entry == "#"* ]] && continue # TestGroup is the first field, delimited by '|'. TestGroup=${entry%%|*} # If user specified a comma-delimted list of test groups with '-g', # run only tests in specified groups. if [[ -n "$UserTestGroups" ]]; then InUserTestGroup=0 for g in ${UserTestGroups//,/ }; do if [[ "${TestGroup^^}" == "${g^^}" ]]; then InUserTestGroup=1 fi done if [[ "$InUserTestGroup" -eq 1 ]]; then dbg "Test Group [$TestGroup] is in user-specified test group list [$UserTestGroups]. Processing it." else dbg "Test Group [$TestGroup] is NOT in user-specified test group list [$UserTestGroups]. Skipping it." # Bypass remainder of the outer for loop if group isn't in users-specified test group list. continue fi fi # P4TestCmd is the second field, delimited by '|'. P4TestCmd=${entry#*|} P4TestCmd=${P4TestCmd%%|*} # ExpectedExit is the third field, delimited by '|'. ExpectedExit=${entry#*|} ExpectedExit=${ExpectedExit#*|} ExpectedExit=${ExpectedExit%%|*} # TestLog is the fourth field, delimited by '|'. TestLog=${entry#*|} TestLog=${TestLog#*|} TestLog=${TestLog#*|} TestLog=${TestLog%%|*} # ExpectedString is the second from the last field; strip from the right. ExpectedString=${entry%|*} ExpectedString=${ExpectedString##*|} # Comments is the last field. Strip everything from the left. Comments=${entry##*|} P4C="p4 -u bruno -c jam_ws -p 1666" if [[ $P4TestCmd == "p4 "* ]]; then P4TestCmd=${P4TestCmd/p4 /$P4C } elif [[ $P4TestCmd == "p4@"* ]]; then # Parse entries like: "p4@P4PORT@P4USER@P4CLIENT command args ..." Port=${P4TestCmd#p4@} Port=${Port%%@*} User=${P4TestCmd#p4@} User=${User#*@} User=${User%%@*} Workspace=${P4TestCmd#p4@} Workspace=${Workspace#*@} Workspace=${Workspace#*@} Workspace=${Workspace%% *} P4TestCmd=${P4TestCmd/p4@$Port@$User@$Workspace /$P4 -p $Port -u $User -c $Workspace } elif [[ $P4TestCmd == "p4:"* ]]; then # Legacy format support for the "p4:user:client" format; this may be dropped later # in favor of the above format, "p4@P4PORT@P4USER@P4CLIENT" # Parse entries like: "p4:user:client command args ..." User=${P4TestCmd#p4:} User=${User%%:*} Workspace=${P4TestCmd#p4:} Workspace=${Workspace#*:} Workspace=${Workspace%% *} Port=1666 P4TestCmd=${P4TestCmd/p4:$User:$Workspace /$P4 -p $Port -u $User -c $Workspace } fi if [[ $ExpectedExit == U ]]; then ExpectedExit=Undefined elif [[ $ExpectedExit == N ]]; then ExpectedExit=NonZero else if [[ $ExpectedExit =~ [0-9]+ ]]; then ExpectedExitCode="$ExpectedExit" else errmsg "Entry on line $Line of $CLITestDataFile has a bogus exit code. Must be numeric or 'U', value is $ExpectedExit. Skipping this test." CLITestDataOK=0 continue fi fi if [[ "$P4TestCmd" == no_cmd && "$TestLog" == output ]]; then errmsg "Invalid use of both 'no_cmd' and 'output' on line $Line of $CLITestDataFile. If no command is run, there can be no output!" CLITestDataOK=0 continue fi msg "TG: [$TestGroup] C:[$P4TestCmd] E:[$ExpectedExit] L:[$TestLog] S:[$ExpectedString] Comments: $Comments" done < "$CLITestDataFile" if [[ "$CLITestDataOK" -eq 1 ]]; then msg "Verified: All test entries are OK." else if [[ "$RequireValidTests" -eq 1 ]]; then bail "The command line test data file [$CLITestDataFile] contained invalid entries. Use '-f' to bypassing bad tests and continue." else msg "\\nSome tests were skipped due to invalid entries in $CLITestDataFile. Continuing due to '-f'.\\n" fi fi msg "$H1\\nPre-start test preparations." if [[ "$LoadSDPEnv" -eq 1 && -r /p4/common/bin/p4_vars ]]; then msg "Loading SDP Environment file." # shellcheck disable=SC1091 source /p4/common/bin/p4_vars 1 ||\ bail "Failed to load SDP environment file p4_vars." else if [[ "$LoadSDPEnv" -eq 0 ]]; then dbg "Skipping load of SDP Environment due to '-no_env'." else dbg "Skipping load of SDP Environment due to missing /p4/common/bin/p4_vars." fi fi msg "$H3\\nExecuting Command Line Tests." # It might seem inelegant to simply read-the config file in again, as opposed to storing and re-using the results # from the data verification run above. But with 'bash', storing results tends to have bad but subtle side effects, # like having quote characters disappear. Line=0; while read -r entry; do Line=$((Line+1)) # shellcheck disable=SC2116 [[ -z "$(echo "$entry")" ]] && continue [[ $entry == "#"* ]] && continue TestPassed=1 # TestGroup is the first field, delimited by '|'. TestGroup=${entry%%|*} # If user specified a comma-delimted list of test groups with '-g', # run only tests in specified groups. if [[ -n "$UserTestGroups" ]]; then InUserTestGroup=0 for g in ${UserTestGroups//,/ }; do if [[ "${TestGroup^^}" == "${g^^}" ]]; then InUserTestGroup=1 fi done if [[ "$InUserTestGroup" -eq 1 ]]; then dbg "Test Group [$TestGroup] is in user-specified test group list [$UserTestGroups]. Processing it." else dbg "Test Group [$TestGroup] is NOT in user-specified test group list [$UserTestGroups]. Skipping it." # Bypass remainder of the outer for loop if group isn't in users-specified test group list. continue fi fi # P4TestCmd is the second field, delimited by '|'. P4TestCmd=${entry#*|} P4TestCmd=${P4TestCmd%%|*} # ExpectedExit is the third field, delimited by '|'. ExpectedExit=${entry#*|} ExpectedExit=${ExpectedExit#*|} ExpectedExit=${ExpectedExit%%|*} # TestLog is the fourth field, delimited by '|'. TestLog=${entry#*|} TestLog=${TestLog#*|} TestLog=${TestLog#*|} TestLog=${TestLog%%|*} # ExpectedString is the second from the last field; strip from the right. ExpectedString=${entry%|*} ExpectedString=${ExpectedString##*|} # Comments is the last field. Strip everything from the left. Comments=${entry##*|} if [[ "$TestLog" == "output" ]]; then ExpectedStringInOutput=1 else ExpectedStringInOutput=0 fi Comments=${entry##*|} if [[ $P4TestCmd == "p4 "* ]]; then P4TestCmd=${P4TestCmd/p4 /$P4C } elif [[ $P4TestCmd == "p4:"* ]]; then # Parse entries like: "p4:user:client command args ..." User=${P4TestCmd#p4:} User=${User%%:*} Workspace=${P4TestCmd#p4:} Workspace=${Workspace#*:} Workspace=${Workspace%% *} P4TestCmd=${P4TestCmd/p4:$User:$Workspace /$P4 -u $User -c $Workspace } fi if [[ $ExpectedExit == U ]]; then ExpectedExit=Undefined elif [[ $ExpectedExit == N ]]; then ExpectedExit=NonZero else if [[ $ExpectedExit =~ [0-9]+ ]]; then ExpectedExitCode="$ExpectedExit" else TestSkipCount+=1 continue fi fi TestID=$((TestCount+1)) msg "$H2\\nTest $TestID\\nTesting Command: $P4TestCmd\\nExpected Exit: $ExpectedExit\\nTest Log: $TestLog\\n" if [[ "$ExpectedStringInOutput" -eq 1 ]]; then msg "Expected string in output: $ExpectedString\\nComments: $Comments\\n" else msg "Expected string in log [$TestLog]: $ExpectedString\\nComments: $Comments\\n" fi if [[ "$NoOp" -eq 0 ]]; then if [[ "$P4TestCmd" != no_cmd ]]; then dbg "Running: eval \"$P4TestCmd\" > \"$OutputFile\" 2>&1" eval "$P4TestCmd" > "$OutputFile" 2>&1 ActualExitCode=$? else dbg "Not executing a command due to 'no_cmd'." # If not running a command, there will be no actual exit code, so just # set it to the expected value. ActualExitCode="$ExpectedExitCode" fi else # NoOp mode. Try to spoof successful test. msg "NO_OP: Would have run: $P4TestCmd > $OutputFile 2>&1" ActualExitCode="$ExpectedExitCode" echo "$ExpectedString" > "$OutputFile" fi # Show the command output unless 'no_cmd' was specified. if [[ "$P4TestCmd" != no_cmd ]]; then msg "\\n== Command Output ==" cat "$OutputFile" msg "TEST_EXIT_CODE: Actual $ActualExitCode, Expected $ExpectedExit" if [[ "$ExpectedExit" == Undefined ]]; then msg "Ignoring TEST_EXIT_CODE due to 'U' value." elif [[ "$ExpectedExit" == NonZero ]]; then [[ "$ActualExitCode" -gt 0 ]] || TestPassed=0 else [[ "$ExpectedExitCode" -ne "$ActualExitCode" ]] && TestPassed=0 fi else msg "Checking log from prior test due to 'no_cmd'." fi if [[ -n "$ExpectedString" ]]; then # Check for the expected string in the command output or the cbd.log file. if [[ "$ExpectedStringInOutput" -eq 1 ]]; then if grep -E -q "$ExpectedString" "$OutputFile"; then msg "\\nExpected string [$ExpectedString] found in command output." else msg "\\nExpected string [$ExpectedString] NOT found in command output." TestPassed=0 fi else if [[ -r "$TestLog" ]]; then if grep -q -E "$ExpectedString" "$TestLog"; then msg "\\nExpected string [$ExpectedString] found in log [$TestLog]." else msg "\\nExpected string [$ExpectedString] NOT found in log [$TestLog]." TestPassed=0 fi else msg "String [$ExpectedString] expected in log [$TestLog] but log is missing." TestPassed=0 fi fi else msg "No expected string defined, skipping check for expected string for this test." fi if [[ "$TestPassed" -eq 1 ]]; then pass else fail fi done < "$CLITestDataFile" if [[ "$TestPassCount" -ge 1 && "$TestFailCount" -eq 0 && "$ErrorCount" -eq 0 ]]; then msg "${H1}\\nALL $TestCount tests PASSED.\\n" else if [[ "$ErrorCount" -eq 0 ]]; then msg "There were no errors in test setup. Test results should be valid." else errmsg "There were $ErrorCount errors in test setup. Test results may not be valid." fi msg "${H1}\\nTest Run completed. Summary:\\nTests Executed (Passed/Failed/Skipped): $TestCount ($TestPassCount/$TestFailCount/$TestSkipCount)\\n\\nScan above output carefully.\\n" fi # Illustrate using $SECONDS to display runtime of a script. msg "That took about $((SECONDS/3600)) hours $((SECONDS%3600/60)) minutes $((SECONDS%60)) seconds.\n" # See the terminate() function, which is really where this script exits. exit "$((ErrorCount+TestFailCount+TestSkipCount))"
# | Change | User | Description | Committed | |
---|---|---|---|---|---|
#3 | 31204 | Will Kreitzmann |
Released SDP 2024.2.31193 (2025/01/17). Copy Up using 'p4 copy -r -b perforce_software-sdp-dev'. |
||
#2 | 31077 | C. Thomas Tyler |
Released SDP 2024.2.31075 (2024/12/20). Copy Up using 'p4 copy -r -b perforce_software-sdp-dev'. |
||
#1 | 30915 | C. Thomas Tyler |
Released SDP 2024.1.30913 (2024/11/20). Copy Up using 'p4 copy -r -b perforce_software-sdp-dev'. |
||
//guest/perforce_software/sdp/dev/test/bsw/run_cli_tests.sh | |||||
#10 | 30748 | C. Thomas Tyler | Added TestGroup to output related to test descriptions. | ||
#9 | 30747 | C. Thomas Tyler |
Added Test Group concept to run_cli_tests.sh, with same meaning and usage as in run_scripted_tests.sh. The goal is to allow running a defined subset of tests. |
||
#8 | 30742 | C. Thomas Tyler | Added support for specifying exit code as 'N' for non-zero. | ||
#7 | 30711 | C. Thomas Tyler | Added support for 'no_cmd' test. | ||
#6 | 30710 | C. Thomas Tyler |
Enhanced method of calling tests to use 'eval' with quoted string, making it work with a wider array of command lines to test with. |
||
#5 | 30695 | C. Thomas Tyler |
Added '-cd <cfg_dir>' option to jailbreak these tests scripts; they can now be used for more general testing. Added '-e' option to run_cli_tests.sh, same as '-e' that already exists in run_scripted_tests.sh. Added '-no_env' option to both scripts. Added '-sp' to skip preflight checks in run_scripted_tests.sh. |
||
#4 | 30686 | C. Thomas Tyler |
Enhanced CLI tests to be able to call arbitrary command lines rather than just p4 commands. |
||
#3 | 30648 | C. Thomas Tyler |
For run_scripts_tests.sh, added '-e' option to abort on first test failure. Updated and corrected documented command line options, removing obsolete options. |
||
#2 | 30623 | C. Thomas Tyler |
Added feature for scripted test to run addtional checks from an earlier execution of a script. Added more tests. |
||
#1 | 30622 | C. Thomas Tyler | Added files for initial SDP BSW test suite. |