|
10 | 10 |
|
11 | 11 | # Output: |
12 | 12 | # Writes the test results to a results.json file in the passed-in output directory. |
13 | | -# The test results are formatted according to the specifications at https://github.com/exercism/docs/blob/main/building/tooling/test-runners/interface.md |
| 13 | +# The test results are formatted according to the specifications at |
| 14 | +# https://github.com/exercism/docs/blob/main/building/tooling/test-runners/interface.md |
14 | 15 |
|
15 | 16 | # Example: |
16 | 17 | # ./bin/run.sh two-fer /absolute/path/to/two-fer/solution/folder/ /absolute/path/to/output/directory/ |
17 | 18 |
|
18 | | -# If any required arguments is missing, print the usage and exit |
19 | | -if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then |
| 19 | +set -o pipefail |
| 20 | +set -u |
| 21 | + |
| 22 | +# If required arguments are missing, print the usage and exit |
| 23 | +if [ $# != 3 ]; then |
20 | 24 | echo "usage: ./bin/run.sh exercise-slug /absolute/path/to/two-fer/solution/folder/ /absolute/path/to/output/directory/" |
21 | 25 | exit 1 |
22 | 26 | fi |
23 | 27 |
|
24 | | -slug="$1" |
25 | | -input_dir="${2%/}" |
26 | | -output_dir="${3%/}" |
27 | | -root_dir=$(realpath $(dirname "$0")/..) |
| 28 | +# Establish the base directory so we can build fully-qualified directories. |
| 29 | +base_dir=$(builtin cd "${BASH_SOURCE%/*}/.." || exit; pwd) |
| 30 | + |
| 31 | +slug=${1} |
| 32 | +input_dir=${2} |
| 33 | +output_dir=${3} |
28 | 34 | results_file="${output_dir}/results.json" |
29 | 35 |
|
| 36 | +# Under Docker the build directory is mounted as a read-write tmpfs so that: |
| 37 | +# - We can work with a write-able file-system |
| 38 | +# - We avoid copying files between the docker host and client giving a nice speedup. |
| 39 | +build_dir=/tmp/build |
| 40 | +cache_dir=${build_dir}/cache |
| 41 | + |
| 42 | +if [ ! -d "${input_dir}" ]; then |
| 43 | + echo "No such directory: ${input_dir}" |
| 44 | + exit 1 |
| 45 | +fi |
| 46 | + |
30 | 47 | # Create the output directory if it doesn't exist |
31 | 48 | mkdir -p "${output_dir}" |
32 | 49 |
|
33 | | -echo "${slug}: testing..." |
| 50 | +# Prepare build directory |
| 51 | +if [ -d "${build_dir}" ]; then |
| 52 | + rm -rf ${build_dir} |
| 53 | +fi |
34 | 54 |
|
35 | | -pushd "${input_dir}" > /dev/null |
| 55 | +mkdir -p ${build_dir} |
| 56 | +pushd "${build_dir}" > /dev/null || exit |
36 | 57 |
|
37 | | -ln -s "${root_dir}/node_modules" |
38 | | -ln -s "${root_dir}/bower_components" |
39 | | -cp -r "${root_dir}/output" . # We can't symlink this as pulp needs to write to it |
| 58 | +# Put the basic spago project in place |
| 59 | +cp "${input_dir}"/*.dhall . |
| 60 | +ln -s "${input_dir}"/src . |
| 61 | +ln -s "${input_dir}"/test . |
| 62 | + |
| 63 | +# Setup cache directory. We require a writable dhall cache because dhall will |
| 64 | +# attempt to fetch the upstream package-set definition. |
| 65 | +mkdir ${cache_dir} |
| 66 | +cp -R "${HOME}"/.cache/dhall ${cache_dir} |
| 67 | +cp -R "${HOME}"/.cache/dhall-haskell ${cache_dir} |
| 68 | + |
| 69 | +# Setup our prepared node setup. |
| 70 | +ln -s "${base_dir}/pre-compiled/node_modules" . |
| 71 | + |
| 72 | +# The timestamps of the `output/` directory must be preserved or else |
| 73 | +# PureScript compiler (`purs`) will invalidate the cache and force a rebuild |
| 74 | +# defeating pre-compiling altogether (and thus the usage of the `cp` `-p` |
| 75 | +# flag). |
| 76 | +cp -R -p "${base_dir}/pre-compiled/output" . |
| 77 | +cp -R "${base_dir}/pre-compiled/.spago" . |
| 78 | + |
| 79 | +echo "Build and test ${slug} in ${build_dir}..." |
40 | 80 |
|
41 | 81 | # Run the tests for the provided implementation file and redirect stdout and |
42 | | -# stderr to capture it |
43 | | -test_output=$(pulp test 2>&1) |
| 82 | +# stderr to capture it. We do our best to minimize the output to emit and |
| 83 | +# compiler errors or unit test output as this scrubbed and presented to the |
| 84 | +# student. In addition spago will try to write to ~/cache/.spago and will fail |
| 85 | +# on a read-only mount and thus we skip the global cache and request to not |
| 86 | +# install packages. |
| 87 | +export XDG_CACHE_HOME=${cache_dir} |
| 88 | +spago_output=$(npx spago --global-cache skip --no-psa test --no-install 2>&1) |
44 | 89 | exit_code=$? |
45 | 90 |
|
46 | | -popd > /dev/null |
| 91 | +popd > /dev/null || exit |
47 | 92 |
|
48 | | -# Write the results.json file based on the exit code of the command that was |
49 | | -# just executed that tested the implementation file |
| 93 | +# Write the results.json file based on the exit code of the command that was |
| 94 | +# just executed that tested the implementation file. |
50 | 95 | if [ $exit_code -eq 0 ]; then |
51 | | - jq -n '{version: 1, status: "pass"}' > ${results_file} |
| 96 | + jq -n '{version: 1, status: "pass"}' > "${results_file}" |
52 | 97 | else |
53 | | - # Sanitize the output |
54 | | - sanitized_test_output=$(echo "${test_output}" | sed -E \ |
55 | | - -e '/^\* Building project/d' \ |
| 98 | + sanitized_spago_output=$(echo "${spago_output}" | sed -E \ |
56 | 99 | -e '/^Compiling/d' \ |
57 | | - -e '/at .*(node:internal|.*\/opt\/test-runner\/.*\.js)/d') |
58 | | - |
59 | | - # Manually add colors to the output to help scanning the output for errors |
60 | | - colorized_test_output=$(echo "${sanitized_test_output}" | \ |
61 | | - GREP_COLOR='01;31' grep --color=always -E -e '(Error found:|Error:|\* ERROR:|.*Failed:).*$|$' | \ |
62 | | - GREP_COLOR='01;32' grep --color=always -E -e '.*Passed:.*$|$') |
63 | | - |
64 | | - printf "${colorized_test_output}" |
| 100 | + -e '/at.*:[[:digit:]]+:[[:digit:]]+\)?/d') |
65 | 101 |
|
66 | | - jq -n --arg output "${colorized_test_output}" '{version: 1, status: "fail", message: $output}' > ${results_file} |
| 102 | + jq --null-input --arg output "${sanitized_spago_output}" '{version: 1, status: "fail", message: $output}' > "${results_file}" |
67 | 103 | fi |
68 | 104 |
|
69 | | -echo "${slug}: done" |
| 105 | +echo "Done" |
0 commit comments