diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 43ac8ea6b..2731d0fcd 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -33,7 +33,11 @@ jobs: - name: Make HTML Docs run: | pip install -e .[memprof,mpi,test,docs] - pytest --junit-xml=test_results.xml -n 4 armi + python -c "from armi.bookkeeping.report.reportingUtils import getSystemInfo;print(getSystemInfo()) > system_info.log + date > python_details.log + python --version >> python_details.log + pip freeze >> python_details.log + pytest --junit-xml=test_results.xml -v -n 4 armi > pytest_verbose.log cd doc git submodule init git submodule update diff --git a/doc/Makefile b/doc/Makefile index 0a688681d..298ea9e21 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -2,7 +2,7 @@ # # You can set these variables from the command line. -SPHINXOPTS = -v +SPHINXOPTS = SPHINXBUILD = sphinx-build SOURCEDIR = . BUILDDIR = _build diff --git a/doc/conf.py b/doc/conf.py index 7bfb8d865..aa8745d2c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -40,8 +40,6 @@ from docutils.parsers.rst import Directive, directives from sphinx.domains.python import PythonDomain -# from sphinx_needs.api import add_dynamic_function # TODO: JOHN - # handle python import locations for this execution PYTHONPATH = os.path.abspath("..") sys.path.insert(0, PYTHONPATH) @@ -67,66 +65,6 @@ _TUTORIAL_FILES = [ fName for fName in bookkeepingTests.TUTORIAL_FILES if "ipynb" not in fName ] -TEST_RESULTS = [] - - -def getTestResult(app, need, needs): - """Dynamic function used by sphinx-needs to gather the result of a test tag.""" - if not need["signature"]: - return "none" - - # Get all the tests that match the method signature - results = [ - test_case["result"] - for test_case in TEST_RESULTS - if need["signature"] == test_case["method"] - ] - # Logic is as follows if there are multiple matches: - # - If one is a "failure", then return "failure" - # - If all are "skipped", then return "skipped" - # - Otherwise, return "passed" - if results: - if "failure" in results: - return "failure" - elif "passed" in results: - return "passed" - else: - return "skipped" - - # Things get a little more complicated when the test tag has a class-level signature. - # Basically we have to determine if all the methods in the class passed or if any of skipped/failed. - # First, gather all the results related to the class signature from the tag and categorize by method - results = {} - for test_case in TEST_RESULTS: - if need["signature"] == test_case["class"]: - if test_case["method"] in results: - results[test_case["method"]].append(test_case["result"]) - else: - results[test_case["method"]] = [test_case["result"]] - - # If we haven't found the test by now, we never will - if not results: - return "none" - - # Apply logic from before for each method in the class - for m, r in results.items(): - if "failure" in r: - results[m] = "failure" - elif "passed" in r: - results[m] = "passed" - else: - results[m] = "skipped" - - # Now for the class logic - # - If any of the methods failed, return "failure" - # - If any of the methods skipped, return "skipped" - # - If all of the methods passed, return "passed" - if "failure" in results.values(): - return "failure" - elif "skipped" in results.values(): - return "skipped" - else: - return "passed" class PatchedPythonDomain(PythonDomain): @@ -287,31 +225,12 @@ def autodoc_skip_member_handler(app, what, name, obj, skip, options): return name.startswith("_") or name in excludes -def getTestAcceptanceCriteria(app, need, needs): - # Return title if there is not just one requirement or the linked requirement doesn't exist - if len(need.get("tests", [])) != 1 or need["tests"][0] not in needs: - ac = need["title"].strip() - - req = needs[need["tests"][0]] - # Return title if there is not just one test in the requirement - if len(req.get("tests_back", [])) != 1: - ac = need["title"].strip() - else: - ac = req["acceptance_criteria"].strip() - - # For some reason sphinx is adding another period at the end of this, so we'll just remove it - return ac[:-1] if ac[-1] == "." else ac - - def setup(app): """Method to make `make html` generate api documentation.""" app.connect("autodoc-skip-member", autodoc_skip_member_handler) app.add_domain(PatchedPythonDomain, override=True) app.add_directive("exec", ExecDirective) app.add_directive("pyreverse", PyReverse) - # add_dynamic_function(app, getTestAcceptanceCriteria, "get_test_acceptance_criteria") # TODO: JOHN - # add_dynamic_function(app, getTestResult, "get_test_result") - # add_dynamic_function(app, getTestAcceptanceCriteria, "get_test_acceptance_criteria") # making tutorial data dir dataDir = pathlib.Path("user") / ".." / "anl-afci-177" @@ -645,20 +564,6 @@ def setup(app): }, } -# TODO: JOHN -""" -needs_global_options = { - # Defaults for test tags - "acceptance_criteria": ( - ":ndf:`get_test_acceptance_criteria()`.", - "type=='test'", - ), - "template": ("test", "type=='test'"), - "layout": ("test_layout", "type=='test'"), - "result": ("[[get_test_result()]]", "type=='test'"), -} -""" - # Formats need roles (reference to a req in text) as just the req ID needs_role_need_template = "{id}" diff --git a/doc/qa_docs/index.rst b/doc/qa_docs/index.rst index 10e0b7270..9688e2dd7 100644 --- a/doc/qa_docs/index.rst +++ b/doc/qa_docs/index.rst @@ -2,9 +2,9 @@ QA Documentation ################ -This is the documentation for the Advanced Reactor Modeling Interface (ARMI) framework. This -document includes the Software Requirements Specification Document (SRSD), the Software Design and -Implementation Document (SDID), and the Software Test Report (STR). +This is the Quality Assurance (QA) documentation for the Advanced Reactor Modeling Interface (ARMI) +framework. This document includes the Software Requirements Specification Document (SRSD), the +Software Design and Implementation Document (SDID), and the Software Test Report (STR). ------------- @@ -14,4 +14,4 @@ Implementation Document (SDID), and the Software Test Report (STR). srsd sdid - str \ No newline at end of file + str diff --git a/doc/qa_docs/str.rst b/doc/qa_docs/str.rst index b22ebc229..ae259321d 100644 --- a/doc/qa_docs/str.rst +++ b/doc/qa_docs/str.rst @@ -1,8 +1,246 @@ Software Test Report (STR) ========================== -TBD +Purpose and Scope +----------------- + +This document is the software test report (STR) for the ARMI framework. + +.. _ref_armi_default_test_criteria: + +Default Test Criteria +--------------------- + +The acceptance tests for ARMI requirements are very uniform. They are all unit tests that were +designed to be quite linear and straight-forward. Unless the test states otherwise, all of the +following test criteria apply to each ARMI requirement test. Any deviation from these standard +conditions will be documented in :numref:`Section %s ` on a test-by- +test basis. + +Testing Approach +^^^^^^^^^^^^^^^^ + +This section defines some test attributes that all tests here have in common. + +Planned Test Cases, Sequence, and Identification of Stages Required +""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +The test cases are described in the test traceability matrix in +:numref:`Section %s `. All tests must be run, and the sequence can be +in any order unless otherwise specified for the test in +:numref:`Section %s `. + +Requirements for Testing Logic Branches +""""""""""""""""""""""""""""""""""""""" + +Tests are written such that each test has only one primary logic path. For tests that do not conform +to only one logic path, more information will be defined in the test traceability section of the +STR (:numref:`Section %s `) defining the logic flow in more detail. + +.. _ref_armi_hardware_integration: + +Requirements for Hardware Integration +""""""""""""""""""""""""""""""""""""" + +The ``ARMI`` software test will be run in modern versions Linux, Windows, and MacOS. Though for +documentation brevity, we will only attach the verbose logging to this document for Linux. + +Criteria for Accepting the Software +""""""""""""""""""""""""""""""""""" + +The acceptance testing must pass with satisfactory results for all tests associated with +requirements in the :ref:`Software Requirements Specification Document (SRSD) ` +for the ``ARMI`` software. + +.. _ref_armi_input_data_requirements: + +Necessary Inputs to Run Test Cases +"""""""""""""""""""""""""""""""""" + +If inputs are necessary to run test cases or to return the system and data back to its original +state, the processes will be documented in the test traceability matrix (TTM) in +:numref:`Section %s ` (The TTM provides traceability for each test to +the required criteria). Otherwise, there are no special inputs necessary to run test cases or steps +to restore the system. + +Required Ranges of Input Parameters for the Test Case(s) +"""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +If a test uses a range of inputs, then it will be documented in the TTM in +:numref:`Section %s `. Otherwise, there are no required ranges of inputs +for the test case. + +Expected Results for the Test Case(s) +""""""""""""""""""""""""""""""""""""" + +If a test expects a specific result, it will be documented in the TTM in +:numref:`Section %s `. Otherwise, the expected test result is that no +error is raised, which constitutes a passing test. + +Acceptance Criteria for the Test Case(s) +"""""""""""""""""""""""""""""""""""""""" + +The acceptance criteria for the test cases will be described. In cases where the SRSD requirement +acceptance criteria is acceptable for the test case acceptance criteria, the SRSD requirement +acceptance criteria can be referenced by default. + +.. _ref_armi_record_criteria: + +Test Record Criteria +^^^^^^^^^^^^^^^^^^^^ + +The default values for the remaining 12 criteria pertaining to the test record are given in this +section below. A test record will be produced after the test is run which contains pertinent +information about the execution of the test. This test record will be saved as part of the software +test report (STR). + +Software Tested, Including System Software Used and All Versions +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +The ARMI version will be shown in the test record via standard output logs. + +Compute Platform and Hardware Used +"""""""""""""""""""""""""""""""""" + +The test record will reference the environment upon which the test is run. See +:numref:`Section %s ` for acceptable test environments. + +Test Equipment and Calibrations +""""""""""""""""""""""""""""""" + +Not applicable for the ``ARMI`` software. + +.. _ref_armi_run_env: + +Runtime Environment Including System Software, and Language-Specific Environments +""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +The runtime environment including the operating system, hardware, and software configuration will be +specified in the test report. If necessary, more detail will be provided for individual tests which +utilize custom runtime environments or have dependencies such as custom compiler options. + +Date of Test +"""""""""""" + +The date of the test execution is recorded in the output of the test. + +Tester or Data Recorder +""""""""""""""""""""""" + +Automation will be created to run the acceptance tests. + +Simulation Models Used +"""""""""""""""""""""" + +If simulation models beyond what is described elsewhere in the documentation (SRSD, SDID, or STR) +are used the simulation models will be documented in the test record. Otherwise, this test record +criterion is not applicable to the test. + +Test Problems Identified During Test Planning +""""""""""""""""""""""""""""""""""""""""""""" + +If specific problems such as textbooks or benchmarks are utilized for the test, then the test record +will reference those problems. Otherwise, test problems are not applicable to the test record. + +All Input Data and Output Results and Applicability +""""""""""""""""""""""""""""""""""""""""""""""""""" + +The input data will be recorded per :numref:`Section %s `. Output +data will be provided as a pass or fail of the test as part of the test record. + +Action Taken in connection with Any Deviations Noted +"""""""""""""""""""""""""""""""""""""""""""""""""""" + +No actions will have been assumed to be taken based on the test other than pass or fail for the +test. If there are exceptions, to this statement, they will be noted in the TTM in +:numref:`Section %s `. + +Person Evaluating Test Result +""""""""""""""""""""""""""""" + +The reviewer of the document will evaluate the test results. Any failing unit test should result in +a release failure. + +Acceptability +""""""""""""" + +The test record states whether the tests pass or fail. + + +.. _ref_armi_test_trace_matrix: + +Test Traceability Matrix +------------------------ + +The requirements and associated tests which demonstrate acceptance of the codebase with the +requirements are in the :ref:`SRSD `. This section contains a list of all tests and will +provide information for any non-default criteria (see +:numref:`Section %s ` for default criteria). + +Here are some quick metrics for the requirement tests in ARMI: + +* :need_count:`type=='req' and status=='accepted'` Accepted Requirements in ARMI + + * :need_count:`type=='req' and status=='accepted' and (len(tests_back)>0` Accepted Requirements with tests + * :need_count:`type=='test' and id.startswith('T_ARMI')` tests linked to Requirements + +And here is a full listing of all the tests in ARMI, that are tied to requirements: + +.. needextract:: + :types: test + :filter: id.startswith('T_ARMI_') + + +Test Results Report +------------------- + +This section provides the results of the test case runs for this release of ARMI software. + +.. _ref_armi_test_env: + +Testing Environment +^^^^^^^^^^^^^^^^^^^ + +This section describes the relevant environment under which the tests were run as required by +:numref:`Section %s `. Note that individual test records have the option to +define additional environment information. + +System Information +"""""""""""""""""" + +The logged operating system and processor information proves what environment the software was +tested on: + +.. literalinclude:: ../system_info.log + +Python Version and Packages ++++++++++++++++++++++++++++ + +.. literalinclude:: ../python_details.log + +.. _ref_armi_software_date: + +Software Tested and Date +"""""""""""""""""""""""" + +The software tested and date of testing are below: + +.. literalinclude:: ../python_details.log + :lines: 1-2 + +Record of Test Cases +^^^^^^^^^^^^^^^^^^^^ + +This section includes the resulting test record for each test which together with +:numref:`Section %s ` satisfies the criteria necessary for the creation of the +test record defined in :numref:`Section %s `. .. test-results:: ../test_results.xml -TODO + +Appendix A Pytest Verbose Output +-------------------------------- + +Below is the verbose output of the pytest run for armi. + +.. literalinclude:: ../pytest_verbose.log