diff --git a/README.md b/README.md index 866d9af..3f8ca41 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ [![Coverage][badge-coverage]][badge-url-coverage] [![Python 3.6][badge-python]](https://www.python.org) -# TES Compliance Suite +# API Compliance Suite -The TES Compliance Suite determines a server's compliance with the [TES API specification][res-tes-spec]. The specification has been developed by the [Global Alliance for Genomics and Health][res-ga4gh], an international coalition, formed to enable the sharing of genomic and clinical data. It serves to provide a standardized API framework and data structure to allow for interoperability of datasets hosted at different institutions. +The API Compliance Suite determines a server's compliance with the [TES API specification][res-tes-spec]. The specification has been developed by the [Global Alliance for Genomics and Health][res-ga4gh], an international coalition, formed to enable the sharing of genomic and clinical data. It serves to provide a standardized API framework and data structure to allow for interoperability of datasets hosted at different institutions. ## Description diff --git a/compliance_suite/cli.py b/compliance_suite/cli.py index 1624994..302543f 100644 --- a/compliance_suite/cli.py +++ b/compliance_suite/cli.py @@ -16,6 +16,7 @@ from compliance_suite.functions.log import logger from compliance_suite.job_runner import JobRunner from compliance_suite.report_server import ReportServer +from compliance_suite.suite_validator import SuiteValidator @click.group() @@ -43,18 +44,45 @@ def validate_regex(ctx: Any, param: Any, value: List[str]): raise click.BadParameter("Only letters (a-z, A-Z), digits (0-9) and underscores (_) are allowed.") -@main.command(help='Run TES compliance tests against the servers') +# def update_path(ctx: Any, param: Any, value: List[str]): +# """Update the test path wrt GitHub workspace +# +# Args: +# ctx: The current click context +# param: The click parameter +# value: The value to validate +# +# Returns: +# The updated value with correct file path inside GitHub workspace +# """ +# +# modified_value = ["tmp/testdir/" + path for path in value] +# return modified_value + + +@main.command(help='Validate compliance tests') +def validate() -> None: + """Validate the test suite""" + + logger.info("Initiate test suite validation") + SuiteValidator.validate() + logger.info("Test suite validation finished") + + +@main.command(help='Run API compliance tests against the servers') @click.option('--server', '-s', required=True, type=str, prompt="Enter server", help='server URL on which the compliance tests are run. Format - https:///') @click.option('--version', '-v', required=True, type=str, prompt="Enter version", - help='TES version. Example - "1.0.0"') + help='API version. Example - "1.0.0"') @click.option('--include-tags', '-i', 'include_tags', multiple=True, help='run tests for provided tags', callback=validate_regex) @click.option('--exclude-tags', '-e', 'exclude_tags', multiple=True, help='skip tests for provided tags', callback=validate_regex) @click.option('--test-path', '-tp', 'test_path', multiple=True, help='the absolute or relative path of the tests to be run', default=["tests"]) -@click.option('--output_path', '-o', help='path to output the JSON report') +# @click.option('--test-path', '-tp', 'test_path', multiple=True, +# help='the absolute or relative path of the tests to be run', default=["tests"], callback=update_path) +@click.option('--output_path', '-o', help='the absolute directory path to store the JSON report') @click.option('--serve', default=False, is_flag=True, help='spin up a server') @click.option('--port', default=15800, help='port at which the compliance report is served') @click.option('--uptime', '-u', default=3600, help='time that server will remain up in seconds') @@ -72,12 +100,12 @@ def report(server: str, Args: server (str): The server URL on which the compliance suite will be run. Format - https:/// - version (str): The compliance suite will be run against this TES version. Example - "1.0.0" + version (str): The compliance suite will be run against this API version. Example - "1.0.0" include_tags (List[str]): The list of the tags for which the compliance suite will be run. exclude_tags (List[str]): The list of the tags for which the compliance suite will not be run. test_path: The list of absolute or relative paths from the project root of the test file/directory. Default - ["tests"] - output_path (str): The output path to store the JSON compliance report + output_path (str): The absolute directory path to store the JSON compliance report serve (bool): If true, runs a local server and displays the JSON report in webview port (int): Set the local server port. Default - 16800 uptime (int): The local server duration in seconds. Default - 3600 seconds @@ -108,11 +136,11 @@ def report(server: str, output.write(json_report) # Writing a report copy to web dir for local server - with open(os.path.join(os.getcwd(), "compliance_suite", "web", "web_report.json"), "w+") as output: + with open(os.path.join(os.getcwd(), "web_report.json"), "w+") as output: output.write(json_report) if serve is True: - report_server = ReportServer(os.path.join(os.getcwd(), "compliance_suite", "web")) + report_server = ReportServer(os.getcwd()) report_server.serve_thread(port, uptime) diff --git a/compliance_suite/constants/constants.py b/compliance_suite/constants/constants.py index d1b4111..fbc17e5 100644 --- a/compliance_suite/constants/constants.py +++ b/compliance_suite/constants/constants.py @@ -11,29 +11,6 @@ 'SUMMARY': 45 } -# API Constants -# 1. Basic & Full views have same required fields. Hence, validating Basic views against Full view Model. - -ENDPOINT_TO_MODEL = { - 'service_info': 'TesServiceInfo', - 'list_tasks_MINIMAL': 'TesListTasksResponseMinimal', - 'list_tasks_BASIC': 'TesListTasksResponse', - 'list_tasks_FULL': 'TesListTasksResponse', - 'get_task_MINIMAL': 'TesTaskMinimal', - 'get_task_BASIC': 'TesTask', - 'get_task_FULL': 'TesTask', - 'create_task': 'TesCreateTaskResponse', - 'create_task_request_body': 'TesTask', - 'cancel_task': 'TesCancelTaskResponse' -} - -REQUEST_HEADERS = { - 'TES': { - 'Accept': 'application/json', - 'Content-Type': 'application/json' - } -} - # String Constants PATTERN_HASH_CENTERED = "{:#^120}" diff --git a/compliance_suite/exceptions/compliance_exception.py b/compliance_suite/exceptions/compliance_exception.py index 416418a..0e7c936 100644 --- a/compliance_suite/exceptions/compliance_exception.py +++ b/compliance_suite/exceptions/compliance_exception.py @@ -29,9 +29,8 @@ def __init__(self, name: str, message: str, details: Any, _type: str): class TestFailureException(BasicException): - """ When a test fails due to incomplete/wrong implementation of a TES server. The exception - highlights the possible changes required by the TES server to follow the standard - TES API Specs""" + """ When a test fails due to incomplete/wrong implementation of an API server. The exception + highlights the possible changes required by the API server to follow the standard API Specs""" def __init__(self, name: str, message: str, details: Any): """Initialize Test Failure Exception object diff --git a/compliance_suite/functions/client.py b/compliance_suite/functions/client.py index 862fee7..f49f8b5 100644 --- a/compliance_suite/functions/client.py +++ b/compliance_suite/functions/client.py @@ -13,7 +13,6 @@ import requests from requests.models import Response -from compliance_suite.constants.constants import REQUEST_HEADERS from compliance_suite.exceptions.compliance_exception import ( TestFailureException, TestRunnerException @@ -28,10 +27,17 @@ def __init__(self): """ Initialize the Client object""" self.check_cancel = False # Checks if the Cancel status is to be validated or not + self.request_headers: Dict = {} + + def set_request_headers(self, request_headers) -> None: + """ Set the request headers + Args: + request_headers: The request headers extracted from Tests repo API config + """ + self.request_headers = request_headers def send_request( self, - service: str, server: str, version: str, endpoint: str, @@ -43,7 +49,6 @@ def send_request( """ Sends the REST request to provided server Args: - service (str): The GA4GH service name (eg. TES) server (str): The server URL to send the request version (str): The version of the deployed server endpoint (str): The endpoint of the given server @@ -61,15 +66,14 @@ def send_request( version = "v" + version.split(".")[0] # Convert SemVer into Major API version base_url: str = str(server) + version + endpoint - request_headers: dict = REQUEST_HEADERS[service] response = None logger.info(f"Sending {operation} request to {base_url}. Query Parameters - {query_params}") try: if operation == "GET": - response = requests.get(base_url, headers=request_headers, params=query_params) + response = requests.get(base_url, headers=self.request_headers, params=query_params) elif operation == "POST": request_body = json.loads(request_body) - response = requests.post(base_url, headers=request_headers, json=request_body) + response = requests.post(base_url, headers=self.request_headers, json=request_body) return response except OSError as err: raise TestRunnerException(name="OS Error", @@ -92,7 +96,7 @@ def check_poll( if response.status_code != 200: logger.info("Unexpected response from Polling request. Retrying...") return False - + # TODO response_json: Any = response.json() valid_states = ["CANCELED", "CANCELING"] if self.check_cancel else ["COMPLETE", "EXECUTOR_ERROR", "SYSTEM_ERROR", "PREEMPTED"] @@ -105,7 +109,6 @@ def check_poll( def poll_request( self, - service: str, server: str, version: str, endpoint: str, @@ -119,7 +122,6 @@ def poll_request( """ This function polls a request to specified server with given interval and timeout Args: - service (str): The GA4GH service name (eg. TES) server (str): The server URL to send the request version (str): The version of the deployed server endpoint (str): The endpoint of the given server @@ -140,12 +142,11 @@ def poll_request( self.check_cancel = check_cancel_val version = "v" + version.split(".")[0] # Convert SemVer into Major API version base_url: str = str(server) + version + endpoint - request_headers: dict = REQUEST_HEADERS[service] logger.info(f"Sending {operation} polling request to {base_url}. Query Parameters - {query_params}") try: - response = polling2.poll(lambda: requests.get(base_url, headers=request_headers, params=query_params), + response = polling2.poll(lambda: requests.get(base_url, headers=self.request_headers, params=query_params), step=polling_interval, timeout=polling_timeout, check_success=self.check_poll) return response diff --git a/compliance_suite/functions/report.py b/compliance_suite/functions/report.py index bf421b5..2bdf1d4 100644 --- a/compliance_suite/functions/report.py +++ b/compliance_suite/functions/report.py @@ -22,9 +22,9 @@ def __init__(self): def initialize_report(self) -> None: """Set Testbed Report details""" - self.report.set_testbed_name("TES Compliance Suite") + self.report.set_testbed_name("API Compliance Suite") self.report.set_testbed_version("0.1.0") - self.report.set_testbed_description("TES Compliance Suite tests the platform against the GA4GH TES API " + self.report.set_testbed_description("The compliance suite tests the platform against the GA4GH TES API " "specs. Its an automated tool system testing against YAML-based test " "files along with the ability to validate cloud service/functionality.") @@ -37,7 +37,7 @@ def set_platform_details(self, platform_server: str) -> None: self.platform_name = platform_server self.report.set_platform_name(platform_server) - self.report.set_platform_description(f"TES service deployed on the {platform_server}") + self.report.set_platform_description(f"API service deployed on the {platform_server}") def add_phase(self, filename: str, description: str) -> Any: """Add a phase which is individual YAML test file diff --git a/compliance_suite/job_runner.py b/compliance_suite/job_runner.py index 372de9f..b5e7f92 100644 --- a/compliance_suite/job_runner.py +++ b/compliance_suite/job_runner.py @@ -11,12 +11,7 @@ List ) -from jsonschema import ( - RefResolver, - validate, - ValidationError -) -import yaml +from ga4gh.testbed.report.test import Test from compliance_suite.constants.constants import ( PATTERN_HASH_CENTERED, @@ -36,10 +31,10 @@ ) from compliance_suite.test_runner import TestRunner from compliance_suite.utils.test_utils import ( + load_and_validate_yaml_data, replace_string, tag_matcher ) -from ga4gh.testbed.report.test import Test class JobRunner: @@ -50,7 +45,7 @@ def __init__(self, server: str, version: str): Args: server (str): The server URL on which the compliance suite will be run - version (str): The compliance suite will be run against this TES version + version (str): The compliance suite will be run against this API version """ self.server: str = server @@ -115,46 +110,6 @@ def generate_summary(self) -> None: logger.summary("", PATTERN_HASH_CENTERED) logger.summary("\n\n\n") - def load_and_validate_yaml_data(self, yaml_file: str, _type: str): - """ - Load and validate YAML data from the file with the provided schema type. - - Args: - yaml_file: The path to the YAML file. - _type: The type of YAML file, either "Test" or "Template". - - Returns: - The loaded and validated YAML data. - """ - - # Load YAML data - try: - yaml_data = yaml.safe_load(open(yaml_file, "r")) - except yaml.YAMLError as err: - raise JobValidationException(name="YAML Error", - message=f"Invalid YAML file {yaml_file}", - details=err) - - # Validate YAML data with schema - schema_dir_path = Path("docs/test_config").absolute() - test_schema_path = Path("docs/test_config/test_schema.json") - template_schema_path = Path("docs/test_config/template_schema.json") - schema_file_path = str(test_schema_path if _type == TEST else template_schema_path) - json_schema = yaml.safe_load(open(schema_file_path, "r")) - - try: - # Python-jsonschema does not reference local files directly - # Refer solution from https://github.com/python-jsonschema/jsonschema/issues/98#issuecomment-105475109 - resolver = RefResolver('file:///' + str(schema_dir_path).replace("\\", "/") + '/', None) - validate(yaml_data, json_schema, resolver=resolver) - logger.info(f'YAML file valid for {_type}: {yaml_file}') - except ValidationError as err: - raise JobValidationException(name="YAML Schema Validation Error", - message=f"YAML file {yaml_file} does not match the {_type} schema", - details=err.message) - - return yaml_data - def generate_report(self) -> Any: """Generates the report via ga4gh-testbed-lib and returns it @@ -178,16 +133,16 @@ def initialize_test(self, yaml_file: Path) -> None: logger.summary(f" Initiating Test-{self.test_count} for {yaml_file} ", PATTERN_HASH_CENTERED) try: - yaml_data = self.load_and_validate_yaml_data(str(yaml_file), TEST) + yaml_data = load_and_validate_yaml_data(str(yaml_file), TEST) report_phase = self.report.add_phase(str(yaml_file), yaml_data["description"]) if (self.version in yaml_data["versions"] and tag_matcher(self.include_tags, self.exclude_tags, yaml_data["tags"])): - test_runner = TestRunner(yaml_data["service"], self.server, self.version) + test_runner = TestRunner(self.server, self.version) job_list: List[Dict] = [] for job in yaml_data["jobs"]: if "$ref" in job: - template_data = self.load_and_validate_yaml_data(job["$ref"], TEMPLATE) + template_data = load_and_validate_yaml_data(job["$ref"], TEMPLATE) if "args" in job: for key, value in job["args"].items(): template_data = replace_string(template_data, f"{{{key}}}", value) @@ -233,7 +188,7 @@ def run_jobs(self) -> None: if search_path.is_file() and search_path.match("*.yml"): self.initialize_test(search_path) elif search_path.is_dir(): - for yaml_file in search_path.glob("**/*.yml"): + for yaml_file in sorted(search_path.glob("**/*.yml")): self.initialize_test(yaml_file) self.generate_summary() diff --git a/compliance_suite/models/__init__.py b/compliance_suite/models/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/compliance_suite/models/v1_0_0_specs.py b/compliance_suite/models/v1_0_0_specs.py deleted file mode 100644 index 80193ea..0000000 --- a/compliance_suite/models/v1_0_0_specs.py +++ /dev/null @@ -1,447 +0,0 @@ -"""Module compliance_suite.models.v1_0_0_specs.py - -Pydantic generated models for TES API Specs v1.0.0 -""" - -from __future__ import annotations - -from datetime import datetime -from enum import Enum -from typing import Dict, List, Optional - -from pydantic import AnyUrl, BaseModel, Field - - -class TesCancelTaskResponse(BaseModel): - pass - - -class TesCreateTaskResponse(BaseModel): - id: str = Field(..., description='Task identifier assigned by the server.') - - -class TesExecutor(BaseModel): - image: str = Field( - ..., - description='Name of the container image. The string will be passed as the image\nargument to the ' - 'containerization run command. Examples:\n - `ubuntu`\n - `quay.io/aptible/ubuntu`\n - ' - '`gcr.io/my-org/my-image`\n - `myregistryhost:5000/fedora/httpd:version1.0`', - example='ubuntu:20.04', - ) - command: List[str] = Field( - ..., - description='A sequence of program arguments to execute, where the first argument\nis the program to ' - 'execute (i.e. argv). Example:\n```\n{\n "command" : ["/bin/md5", "/data/file1"]\n}\n```', - example=['/bin/md5', '/data/file1'], - ) - workdir: Optional[str] = Field( - None, - description='The working directory that the command will be executed in.\nIf not defined, the system will ' - 'default to the directory set by\nthe container image.', - example='/data/', - ) - stdin: Optional[str] = Field( - None, - description='Path inside the container to a file which will be piped\nto the executor\'s stdin. This must be ' - 'an absolute path. This mechanism\ncould be used in conjunction with the input declaration to ' - 'process\na data file using a tool that expects STDIN.\n\nFor example, to get the MD5 sum of a ' - 'file by reading it into the STDIN\n```\n{\n "command" : ["/bin/md5"],\n ' - '"stdin" : "/data/file1"\n}\n```', - example='/data/file1', - ) - stdout: Optional[str] = Field( - None, - description='Path inside the container to a file where the executor\'s\nstdout will be written to. ' - 'Must be an absolute path. Example:\n```\n{\n "stdout" : "/tmp/stdout.log"\n}\n```', - example='/tmp/stdout.log', - ) - stderr: Optional[str] = Field( - None, - description='Path inside the container to a file where the executor\'s\nstderr will be written to. Must be ' - 'an absolute path. Example:\n```\n{\n "stderr" : "/tmp/stderr.log"\n}\n```', - example='/tmp/stderr.log', - ) - env: Optional[Dict[str, str]] = Field( - None, - description='Enviromental variables to set within the container. Example:\n```\n{\n "env" : {\n ' - '"ENV_CONFIG_PATH" : "/data/config.file",\n "BLASTDB" : "/data/GRC38",\n ' - '"HMMERDB" : "/data/hmmer"\n }\n}\n```', - example={'BLASTDB': '/data/GRC38', 'HMMERDB': '/data/hmmer'}, - ) - - -class TesExecutorLog(BaseModel): - start_time: Optional[str] = Field( - None, - description='Time the executor started, in RFC 3339 format.', - example='2020-10-02T15:00:00.000Z', - ) - end_time: Optional[str] = Field( - None, - description='Time the executor ended, in RFC 3339 format.', - example='2020-10-02T16:00:00.000Z', - ) - stdout: Optional[str] = Field( - None, - description='Stdout content.\n\nThis is meant for convenience. No guarantees are made about the ' - 'content.\nImplementations may chose different approaches: only the head, only the tail,\na URL ' - 'reference only, etc.\n\nIn order to capture the full stdout client should set ' - 'Executor.stdout\nto a container file path, and use Task.outputs to upload that file\nto ' - 'permanent storage.', - ) - stderr: Optional[str] = Field( - None, - description='Stderr content.\n\nThis is meant for convenience. No guarantees are made about the ' - 'content.\nImplementations may chose different approaches: only the head, only the tail,\na URL ' - 'reference only, etc.\n\nIn order to capture the full stderr client should set ' - 'Executor.stderr\nto a container file path, and use Task.outputs to upload that file\nto ' - 'permanent storage.', - ) - exit_code: int = Field(..., description='Exit code.') - - -class TesFileType(Enum): - FILE = 'FILE' - DIRECTORY = 'DIRECTORY' - - -class TesInput(BaseModel): - name: Optional[str] = None - description: Optional[str] = None - url: Optional[str] = Field( - None, - description='REQUIRED, unless "content" is set.\n\nURL in long term storage, for example:\n - ' - 's3://my-object-store/file1\n - gs://my-bucket/file2\n - file:///path/to/my/file\n - ' - '/path/to/my/file', - example='s3://my-object-store/file1', - ) - path: str = Field( - ..., - description='Path of the file inside the container.\nMust be an absolute path.', - example='/data/file1', - ) - type: TesFileType - content: Optional[str] = Field( - None, - description='File content literal.\n\nImplementations should support a minimum of 128 KiB in this ' - 'field\nand may define their own maximum.\n\nUTF-8 encoded\n\nIf content is not empty, ' - '"url" must be ignored.', - ) - - -class TesOutput(BaseModel): - name: Optional[str] = Field(None, description='User-provided name of output file') - description: Optional[str] = Field( - None, - description='Optional users provided description field, can be used for documentation.', - ) - url: str = Field( - ..., - description='URL for the file to be copied by the TES server after the task is complete.\nFor ' - 'Example:\n - `s3://my-object-store/file1`\n - `gs://my-bucket/file2`\n - ' - '`file:///path/to/my/file`', - ) - path: str = Field( - ..., - description='Path of the file inside the container.\nMust be an absolute path.', - ) - type: TesFileType - - -class TesOutputFileLog(BaseModel): - url: str = Field( - ..., description='URL of the file in storage, e.g. s3://bucket/file.txt' - ) - path: str = Field( - ..., - description='Path of the file inside the container. Must be an absolute path.', - ) - size_bytes: str = Field( - ..., - description="Size of the file in bytes. Note, this is currently coded as a string\nbecause official " - "JSON doesn't support int64 numbers.", - example=['1024'], - ) - - -class TesResources(BaseModel): - cpu_cores: Optional[int] = Field( - None, description='Requested number of CPUs', example=4 - ) - preemptible: Optional[bool] = Field( - None, - description="Define if the task is allowed to run on preemptible compute instances,\nfor example, " - "AWS Spot. This option may have no effect when utilized\non some backends that don't have " - "the concept of preemptible jobs.", - example=False, - ) - ram_gb: Optional[float] = Field( - None, description='Requested RAM required in gigabytes (GB)', example=8 - ) - disk_gb: Optional[float] = Field( - None, description='Requested disk size in gigabytes (GB)', example=40 - ) - zones: Optional[List[str]] = Field( - None, - description='Request that the task be run in these compute zones. How this string\nis utilized ' - 'will be dependent on the backend system. For example, a\nsystem based on a cluster ' - 'queueing system may use this string to define\npriorty queue to which the job is assigned.', - example='us-west-1', - ) - - -class Artifact(Enum): - tes = 'tes' - - -class TesState(Enum): - UNKNOWN = 'UNKNOWN' - QUEUED = 'QUEUED' - INITIALIZING = 'INITIALIZING' - RUNNING = 'RUNNING' - PAUSED = 'PAUSED' - COMPLETE = 'COMPLETE' - EXECUTOR_ERROR = 'EXECUTOR_ERROR' - SYSTEM_ERROR = 'SYSTEM_ERROR' - CANCELED = 'CANCELED' - - -class TesTaskLog(BaseModel): - logs: List[TesExecutorLog] = Field(..., description='Logs for each executor') - metadata: Optional[Dict[str, str]] = Field( - None, - description='Arbitrary logging metadata included by the implementation.', - example={'host': 'worker-001', 'slurmm_id': 123456}, - ) - start_time: Optional[str] = Field( - None, - description='When the task started, in RFC 3339 format.', - example='2020-10-02T15:00:00.000Z', - ) - end_time: Optional[str] = Field( - None, - description='When the task ended, in RFC 3339 format.', - example='2020-10-02T16:00:00.000Z', - ) - outputs: List[TesOutputFileLog] = Field( - ..., - description='Information about all output files. Directory outputs are\nflattened into separate items.', - ) - system_logs: Optional[List[str]] = Field( - None, - description='System logs are any logs the system decides are relevant,\nwhich are not tied directly ' - 'to an Executor process.\nContent is implementation specific: format, size, etc.\n\nSystem ' - 'logs may be collected here to provide convenient access.\n\nFor example, the system may ' - 'include the name of the host\nwhere the task is executing, an error message ' - 'that caused\na SYSTEM_ERROR state (e.g. disk is full), etc.\n\nSystem logs are ' - 'only included in the FULL task view.', - ) - - -class ServiceType(BaseModel): - group: str = Field( - ..., - description="Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant " - "with official GA4GH specifications. For services with custom APIs not standardized by " - "GA4GH, or implementations diverging from official GA4GH specifications, use a different " - "namespace (e.g. your organization's reverse domain name).", - example='org.ga4gh', - ) - artifact: str = Field( - ..., - description='Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned ' - 'as part of standards approval process. Custom artifacts are supported.', - example='beacon', - ) - version: str = Field( - ..., - description='Version of the API or specification. GA4GH specifications use semantic versioning.', - example='1.0.0', - ) - - -class Organization(BaseModel): - name: str = Field( - ..., - description='Name of the organization responsible for the service', - example='My organization', - ) - url: AnyUrl = Field( - ..., - description='URL of the website of the organization (RFC 3986 format)', - example='https://example.com', - ) - - -class Service(BaseModel): - id: str = Field( - ..., - description='Unique ID of this service. Reverse domain name notation is recommended, though not required. ' - 'The identifier should attempt to be globally unique so it can be used in downstream ' - 'aggregator services e.g. Service Registry.', - example='org.ga4gh.myservice', - ) - name: str = Field( - ..., - description='Name of this service. Should be human readable.', - example='My project', - ) - type: ServiceType - description: Optional[str] = Field( - None, - description='Description of the service. Should be human readable and provide information about the service.', - example='This service provides...', - ) - organization: Organization = Field( - ..., description='Organization providing the service' - ) - contactUrl: Optional[AnyUrl] = Field( - None, - description='URL of the contact for the provider of this service, e.g. a link to a contact form ' - '(RFC 3986 format), or an email (RFC 2368 format).', - example='mailto:support@example.com', - ) - documentationUrl: Optional[AnyUrl] = Field( - None, - description='URL of the documentation of this service (RFC 3986 format). This should help someone ' - 'learn how to use your service, including any specifics required to access data, ' - 'e.g. authentication.', - example='https://docs.myservice.example.com', - ) - createdAt: Optional[datetime] = Field( - None, - description='Timestamp describing when the service was first deployed and available (RFC 3339 format)', - example='2019-06-04T12:58:19Z', - ) - updatedAt: Optional[datetime] = Field( - None, - description='Timestamp describing when the service was last updated (RFC 3339 format)', - example='2019-06-04T12:58:19Z', - ) - environment: Optional[str] = Field( - None, - description='Environment the service is running in. Use this to distinguish between production, ' - 'development and testing/staging deployments. Suggested values are prod, test, dev, ' - 'staging. However this is advised and not enforced.', - example='test', - ) - version: str = Field( - ..., - description='Version of the service being described. Semantic versioning is recommended, but ' - 'other identifiers, such as dates or commit hashes, are also allowed. The version should ' - 'be changed whenever the service is updated.', - example='1.0.0', - ) - - -class TesServiceType(ServiceType): - artifact: Artifact = Field(..., example='tes') - - -class TesServiceInfo(Service): - storage: Optional[List[str]] = Field( - None, - description='Lists some, but not necessarily all, storage locations supported\nby the service.', - example=[ - 'file:///path/to/local/funnel-storage', - 's3://ohsu-compbio-funnel/storage', - ], - ) - type: TesServiceType = Field(...) - - -class TesTask(BaseModel): - id: Optional[str] = Field( - None, - description='Task identifier assigned by the server.', - example='job-0012345', - ) - state: Optional[TesState] = None - name: Optional[str] = Field(None, description='User-provided task name.') - description: Optional[str] = Field( - None, - description='Optional user-provided description of task for documentation purposes.', - ) - inputs: Optional[List[TesInput]] = Field( - None, - description='Input files that will be used by the task. Inputs will be downloaded\nand mounted into ' - 'the executor container as defined by the task request\ndocument.', - example=[{'url': 's3://my-object-store/file1', 'path': '/data/file1'}], - ) - outputs: Optional[List[TesOutput]] = Field( - None, - description='Output files.\nOutputs will be uploaded from the executor container to long-term storage.', - example=[ - { - 'path': '/data/outfile', - 'url': 's3://my-object-store/outfile-1', - 'type': 'FILE', - } - ], - ) - resources: Optional[TesResources] = None - executors: List[TesExecutor] = Field( - ..., - description='An array of executors to be run. Each of the executors will run one\nat a time sequentially. ' - 'Each executor is a different command that\nwill be run, and each can utilize a different ' - 'docker image. But each of\nthe executors will see the same mapped inputs and volumes ' - 'that are declared\nin the parent CreateTask message.\n\nExecution stops on the first error.', - ) - volumes: Optional[List[str]] = Field( - None, - description='Volumes are directories which may be used to share data between\nExecutors. Volumes are ' - 'initialized as empty directories by the\nsystem when the task starts and are mounted at the ' - 'same path\nin each Executor.\n\nFor example, given a volume defined at `/vol/A`,\nexecutor 1 ' - 'may write a file to `/vol/A/exec1.out.txt`, then\nexecutor 2 may read from that ' - 'file.\n\n(Essentially, this translates to a `docker run -v` flag where\nthe container path ' - 'is the same for each executor).', - example=['/vol/A/'], - ) - tags: Optional[Dict[str, str]] = Field( - None, - description='A key-value map of arbitrary tags. These can be used to store meta-data\nand annotations ' - 'about a task. Example:\n```\n{\n "tags" : {\n "WORKFLOW_ID" : "cwl-01234",\n ' - '"PROJECT_GROUP" : "alice-lab"\n }\n}\n```', - example={'WORKFLOW_ID': 'cwl-01234', 'PROJECT_GROUP': 'alice-lab'}, - ) - logs: Optional[List[TesTaskLog]] = Field( - None, - description='Task logging information.\nNormally, this will contain only one entry, but in the case ' - 'where\na task fails and is retried, an entry will be appended to this list.', - ) - creation_time: Optional[str] = Field( - None, - description='Date + time the task was created, in RFC 3339 format.\nThis is set by the system, not the client.', - example='2020-10-02T15:00:00.000Z', - ) - - -class TesListTasksResponse(BaseModel): - tasks: List[TesTask] = Field( - ..., - description='List of tasks. These tasks will be based on the original submitted\ntask document, but with ' - 'other fields, such as the job state and\nlogging info, added/changed as the job progresses.', - ) - next_page_token: Optional[str] = Field( - None, - description='Token used to return the next page of results. This value can be used\nin the `page_token` ' - 'field of the next ListTasks request.', - ) - - -# Extra models manually added for Minimal View -class TesTaskMinimal(BaseModel): - id: str = Field( - ..., - description='Task identifier assigned by the server.', - example='job-0012345', - ) - state: TesState = Field(..., example='UNKNOWN') - - -class TesListTasksResponseMinimal(BaseModel): - tasks: List[TesTaskMinimal] = Field( - ..., - description='List of tasks. These tasks will be based on the original submitted\ntask document, but with ' - 'other fields, such as the job state and\nlogging info, added/changed as the job progresses.', - ) diff --git a/compliance_suite/models/v1_1_0_specs.py b/compliance_suite/models/v1_1_0_specs.py deleted file mode 100644 index 7e82633..0000000 --- a/compliance_suite/models/v1_1_0_specs.py +++ /dev/null @@ -1,498 +0,0 @@ -"""Module compliance_suite.models.v1_1_0_specs.py - -Pydantic generated models for TES API Specs v1.1.0 -""" - -from __future__ import annotations - -from datetime import datetime -from enum import Enum -from typing import Dict, List, Optional - -from pydantic import AnyUrl, BaseModel, Field - - -class TesCancelTaskResponse(BaseModel): - pass - - -class TesCreateTaskResponse(BaseModel): - id: str = Field(..., description='Task identifier assigned by the server.') - - -class TesExecutor(BaseModel): - image: str = Field( - ..., - description='Name of the container image. The string will be passed as the image\nargument to the ' - 'containerization run command. Examples:\n - `ubuntu`\n - `quay.io/aptible/ubuntu`\n - ' - '`gcr.io/my-org/my-image`\n - `myregistryhost:5000/fedora/httpd:version1.0`', - example='ubuntu:20.04', - ) - command: List[str] = Field( - ..., - description='A sequence of program arguments to execute, where the first argument\nis the program to ' - 'execute (i.e. argv). Example:\n```\n{\n "command" : ["/bin/md5", "/data/file1"]\n}\n```', - example=['/bin/md5', '/data/file1'], - ) - workdir: Optional[str] = Field( - None, - description='The working directory that the command will be executed in.\nIf not defined, the system will ' - 'default to the directory set by\nthe container image.', - example='/data/', - ) - stdin: Optional[str] = Field( - None, - description='Path inside the container to a file which will be piped\nto the executor\'s stdin. This must be ' - 'an absolute path. This mechanism\ncould be used in conjunction with the input declaration to ' - 'process\na data file using a tool that expects STDIN.\n\nFor example, to get the MD5 sum of a ' - 'file by reading it into the STDIN\n```\n{\n "command" : ["/bin/md5"],\n ' - '"stdin" : "/data/file1"\n}\n```', - example='/data/file1', - ) - stdout: Optional[str] = Field( - None, - description='Path inside the container to a file where the executor\'s\nstdout will be written to. ' - 'Must be an absolute path. Example:\n```\n{\n "stdout" : "/tmp/stdout.log"\n}\n```', - example='/tmp/stdout.log', - ) - stderr: Optional[str] = Field( - None, - description='Path inside the container to a file where the executor\'s\nstderr will be written to. Must be ' - 'an absolute path. Example:\n```\n{\n "stderr" : "/tmp/stderr.log"\n}\n```', - example='/tmp/stderr.log', - ) - env: Optional[Dict[str, str]] = Field( - None, - description='Enviromental variables to set within the container. Example:\n```\n{\n "env" : {\n ' - '"ENV_CONFIG_PATH" : "/data/config.file",\n "BLASTDB" : "/data/GRC38",\n ' - '"HMMERDB" : "/data/hmmer"\n }\n}\n```', - example={'BLASTDB': '/data/GRC38', 'HMMERDB': '/data/hmmer'}, - ) - ignore_error: Optional[bool] = Field( - None, - description='Default behavior of running an array of executors is that execution\nstops on the first error. ' - 'If `ignore_error` is `True`, then the\nrunner will record error exit codes, but will continue on ' - 'to the next\ntesExecutor.', - ) - - -class TesExecutorLog(BaseModel): - start_time: Optional[str] = Field( - None, - description='Time the executor started, in RFC 3339 format.', - example='2020-10-02T10:00:00-05:00', - ) - end_time: Optional[str] = Field( - None, - description='Time the executor ended, in RFC 3339 format.', - example='2020-10-02T11:00:00-05:00', - ) - stdout: Optional[str] = Field( - None, - description='Stdout content.\n\nThis is meant for convenience. No guarantees are made about the ' - 'content.\nImplementations may chose different approaches: only the head, only the tail,\na URL ' - 'reference only, etc.\n\nIn order to capture the full stdout client should set ' - 'Executor.stdout\nto a container file path, and use Task.outputs to upload that file\nto ' - 'permanent storage.', - ) - stderr: Optional[str] = Field( - None, - description='Stderr content.\n\nThis is meant for convenience. No guarantees are made about the ' - 'content.\nImplementations may chose different approaches: only the head, only the tail,\na URL ' - 'reference only, etc.\n\nIn order to capture the full stderr client should set ' - 'Executor.stderr\nto a container file path, and use Task.outputs to upload that file\nto ' - 'permanent storage.', - ) - exit_code: int = Field(..., description='Exit code.') - - -class TesFileType(Enum): - FILE = 'FILE' - DIRECTORY = 'DIRECTORY' - - -class TesInput(BaseModel): - name: Optional[str] = None - description: Optional[str] = None - url: Optional[str] = Field( - None, - description='REQUIRED, unless "content" is set.\n\nURL in long term storage, for example:\n - ' - 's3://my-object-store/file1\n - gs://my-bucket/file2\n - file:///path/to/my/file\n - ' - '/path/to/my/file', - example='s3://my-object-store/file1', - ) - path: str = Field( - ..., - description='Path of the file inside the container.\nMust be an absolute path.', - example='/data/file1', - ) - type: Optional[TesFileType] = None - content: Optional[str] = Field( - None, - description='File content literal.\n\nImplementations should support a minimum of 128 KiB in this ' - 'field\nand may define their own maximum.\n\nUTF-8 encoded\n\nIf content is not empty, ' - '"url" must be ignored.', - ) - streamable: Optional[bool] = Field( - None, - description='Indicate that a file resource could be accessed using a streaming\ninterface, ie a FUSE mounted ' - 's3 object. This flag indicates that\nusing a streaming mount, as opposed to downloading the whole ' - 'file to\nthe local scratch space, may be faster despite the latency and\noverhead. This does not ' - 'mean that the backend will use a streaming\ninterface, as it may not be provided by the vendor, ' - 'but if the\ncapacity is avalible it can be used without degrading the\nperformance of the ' - 'underlying program.', - ) - - -class TesOutput(BaseModel): - name: Optional[str] = Field(None, description='User-provided name of output file') - description: Optional[str] = Field( - None, - description='Optional users provided description field, can be used for documentation.', - ) - url: str = Field( - ..., - description='URL at which the TES server makes the output accessible after the task is complete.\nWhen ' - 'tesOutput.path contains wildcards, it must be a directory; see\n`tesOutput.path_prefix` for ' - 'details on how output URLs are constructed in this case.\nFor Example:\n - ' - '`s3://my-object-store/file1`\n - `gs://my-bucket/file2`\n - `file:///path/to/my/file`', - ) - path: str = Field( - ..., - description='Absolute path of the file inside the container.\nMay contain pattern matching wildcards to select ' - 'multiple outputs at once, but mind\nimplications for `tesOutput.url` and `tesOutput.path_prefix`.' - '\nOnly wildcards defined in IEEE Std 1003.1-2017 (POSIX), 12.3 are supported; ' - 'see\nhttps://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_13', - ) - path_prefix: Optional[str] = Field( - None, - description='Prefix to be removed from matching outputs if `tesOutput.path` contains wildcards;\noutput URLs ' - 'are constructed by appending pruned paths to the directory specfied\nin ' - '`tesOutput.url`.\nRequired if `tesOutput.path` contains wildcards, ignored otherwise.', - ) - type: Optional[TesFileType] = None - - -class TesOutputFileLog(BaseModel): - url: str = Field( - ..., description='URL of the file in storage, e.g. s3://bucket/file.txt' - ) - path: str = Field( - ..., - description='Path of the file inside the container. Must be an absolute path.', - ) - size_bytes: str = Field( - ..., - description="Size of the file in bytes. Note, this is currently coded as a string\nbecause official " - "JSON doesn't support int64 numbers.", - example=['1024'], - ) - - -class TesResources(BaseModel): - cpu_cores: Optional[int] = Field( - None, description='Requested number of CPUs', example=4 - ) - preemptible: Optional[bool] = Field( - None, - description="Define if the task is allowed to run on preemptible compute instances,\nfor example, " - "AWS Spot. This option may have no effect when utilized\non some backends that don't have " - "the concept of preemptible jobs.", - example=False, - ) - ram_gb: Optional[float] = Field( - None, description='Requested RAM required in gigabytes (GB)', example=8 - ) - disk_gb: Optional[float] = Field( - None, description='Requested disk size in gigabytes (GB)', example=40 - ) - zones: Optional[List[str]] = Field( - None, - description='Request that the task be run in these compute zones. How this string\nis utilized ' - 'will be dependent on the backend system. For example, a\nsystem based on a cluster ' - 'queueing system may use this string to define\npriorty queue to which the job is assigned.', - example='us-west-1', - ) - backend_parameters: Optional[Dict[str, str]] = Field( - None, - description='Key/value pairs for backend configuration.\nServiceInfo shall return a list of keys that a ' - 'backend supports.\nKeys are case insensitive.\nIt is expected that clients pass all runtime ' - 'or hardware requirement key/values\nthat are not mapped to existing tesResources properties ' - 'to backend_parameters.\nBackends shall log system warnings if a key is passed that is ' - 'unsupported.\nBackends shall not store or return unsupported keys if included in a task.\nIf ' - 'backend_parameters_strict equals true,\nbackends should fail the task if any key/values are ' - 'unsupported, otherwise,\nbackends should attempt to run the task\nIntended uses include VM size ' - 'selection, coprocessor configuration, etc.\nExample:\n```\n{\n "backend_parameters" : ' - '{\n "VmSize" : "Standard_D64_v3"\n }\n}\n```', - example={'VmSize': 'Standard_D64_v3'}, - ) - backend_parameters_strict: Optional[bool] = Field( - False, - description='If set to true, backends should fail the task if any backend_parameters\nkey/values are ' - 'unsupported, otherwise, backends should attempt to run the task', - example=False, - ) - - -class Artifact(Enum): - tes = 'tes' - - -class TesState(Enum): - UNKNOWN = 'UNKNOWN' - QUEUED = 'QUEUED' - INITIALIZING = 'INITIALIZING' - RUNNING = 'RUNNING' - PAUSED = 'PAUSED' - COMPLETE = 'COMPLETE' - EXECUTOR_ERROR = 'EXECUTOR_ERROR' - SYSTEM_ERROR = 'SYSTEM_ERROR' - CANCELED = 'CANCELED' - PREEMPTED = 'PREEMPTED' - CANCELING = 'CANCELING' - - -class TesTaskLog(BaseModel): - logs: List[TesExecutorLog] = Field(..., description='Logs for each executor') - metadata: Optional[Dict[str, str]] = Field( - None, - description='Arbitrary logging metadata included by the implementation.', - example={'host': 'worker-001', 'slurmm_id': 123456}, - ) - start_time: Optional[str] = Field( - None, - description='When the task started, in RFC 3339 format.', - example='2020-10-02T10:00:00-05:00', - ) - end_time: Optional[str] = Field( - None, - description='When the task ended, in RFC 3339 format.', - example='2020-10-02T11:00:00-05:00', - ) - outputs: List[TesOutputFileLog] = Field( - ..., - description='Information about all output files. Directory outputs are\nflattened into separate items.', - ) - system_logs: Optional[List[str]] = Field( - None, - description='System logs are any logs the system decides are relevant,\nwhich are not tied directly ' - 'to an Executor process.\nContent is implementation specific: format, size, etc.\n\nSystem ' - 'logs may be collected here to provide convenient access.\n\nFor example, the system may ' - 'include the name of the host\nwhere the task is executing, an error message ' - 'that caused\na SYSTEM_ERROR state (e.g. disk is full), etc.\n\nSystem logs are ' - 'only included in the FULL task view.', - ) - - -class ServiceType(BaseModel): - group: str = Field( - ..., - description="Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant " - "with official GA4GH specifications. For services with custom APIs not standardized by " - "GA4GH, or implementations diverging from official GA4GH specifications, use a different " - "namespace (e.g. your organization's reverse domain name).", - example='org.ga4gh', - ) - artifact: str = Field( - ..., - description='Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned ' - 'as part of standards approval process. Custom artifacts are supported.', - example='beacon', - ) - version: str = Field( - ..., - description='Version of the API or specification. GA4GH specifications use semantic versioning.', - example='1.0.0', - ) - - -class Organization(BaseModel): - name: str = Field( - ..., - description='Name of the organization responsible for the service', - example='My organization', - ) - url: AnyUrl = Field( - ..., - description='URL of the website of the organization (RFC 3986 format)', - example='https://example.com', - ) - - -class Service(BaseModel): - id: str = Field( - ..., - description='Unique ID of this service. Reverse domain name notation is recommended, though not required. ' - 'The identifier should attempt to be globally unique so it can be used in downstream ' - 'aggregator services e.g. Service Registry.', - example='org.ga4gh.myservice', - ) - name: str = Field( - ..., - description='Name of this service. Should be human readable.', - example='My project', - ) - type: ServiceType - description: Optional[str] = Field( - None, - description='Description of the service. Should be human readable and provide information about the service.', - example='This service provides...', - ) - organization: Organization = Field( - ..., description='Organization providing the service' - ) - contactUrl: Optional[AnyUrl] = Field( - None, - description='URL of the contact for the provider of this service, e.g. a link to a contact form ' - '(RFC 3986 format), or an email (RFC 2368 format).', - example='mailto:support@example.com', - ) - documentationUrl: Optional[AnyUrl] = Field( - None, - description='URL of the documentation of this service (RFC 3986 format). This should help someone ' - 'learn how to use your service, including any specifics required to access data, ' - 'e.g. authentication.', - example='https://docs.myservice.example.com', - ) - createdAt: Optional[datetime] = Field( - None, - description='Timestamp describing when the service was first deployed and available (RFC 3339 format)', - example='2019-06-04T12:58:19Z', - ) - updatedAt: Optional[datetime] = Field( - None, - description='Timestamp describing when the service was last updated (RFC 3339 format)', - example='2019-06-04T12:58:19Z', - ) - environment: Optional[str] = Field( - None, - description='Environment the service is running in. Use this to distinguish between production, ' - 'development and testing/staging deployments. Suggested values are prod, test, dev, ' - 'staging. However this is advised and not enforced.', - example='test', - ) - version: str = Field( - ..., - description='Version of the service being described. Semantic versioning is recommended, but ' - 'other identifiers, such as dates or commit hashes, are also allowed. The version should ' - 'be changed whenever the service is updated.', - example='1.0.0', - ) - - -class TesServiceType(ServiceType): - artifact: Artifact = Field(..., example='tes') - - -class TesServiceInfo(Service): - storage: Optional[List[str]] = Field( - None, - description='Lists some, but not necessarily all, storage locations supported\nby the service.', - example=[ - 'file:///path/to/local/funnel-storage', - 's3://ohsu-compbio-funnel/storage', - ], - ) - tesResources_backend_parameters: Optional[List[str]] = Field( - None, - description='Lists all tesResources.backend_parameters keys supported\nby the service', - example=['VmSize'], - ) - type: TesServiceType = Field(...) - - -class TesTask(BaseModel): - id: Optional[str] = Field( - None, - description='Task identifier assigned by the server.', - example='job-0012345', - ) - state: Optional[TesState] = None - name: Optional[str] = Field(None, description='User-provided task name.') - description: Optional[str] = Field( - None, - description='Optional user-provided description of task for documentation purposes.', - ) - inputs: Optional[List[TesInput]] = Field( - None, - description='Input files that will be used by the task. Inputs will be downloaded\nand mounted into ' - 'the executor container as defined by the task request\ndocument.', - example=[{'url': 's3://my-object-store/file1', 'path': '/data/file1'}], - ) - outputs: Optional[List[TesOutput]] = Field( - None, - description='Output files.\nOutputs will be uploaded from the executor container to long-term storage.', - example=[ - { - 'path': '/data/outfile', - 'url': 's3://my-object-store/outfile-1', - 'type': 'FILE', - } - ], - ) - resources: Optional[TesResources] = None - executors: List[TesExecutor] = Field( - ..., - description='An array of executors to be run. Each of the executors will run one\nat a time sequentially. ' - 'Each executor is a different command that\nwill be run, and each can utilize a different ' - 'docker image. But each of\nthe executors will see the same mapped inputs and volumes ' - 'that are declared\nin the parent CreateTask message.\n\nExecution stops on the first error.', - ) - volumes: Optional[List[str]] = Field( - None, - description='Volumes are directories which may be used to share data between\nExecutors. Volumes are ' - 'initialized as empty directories by the\nsystem when the task starts and are mounted at the ' - 'same path\nin each Executor.\n\nFor example, given a volume defined at `/vol/A`,\nexecutor 1 ' - 'may write a file to `/vol/A/exec1.out.txt`, then\nexecutor 2 may read from that ' - 'file.\n\n(Essentially, this translates to a `docker run -v` flag where\nthe container path ' - 'is the same for each executor).', - example=['/vol/A/'], - ) - tags: Optional[Dict[str, str]] = Field( - None, - description='A key-value map of arbitrary tags. These can be used to store meta-data\nand annotations ' - 'about a task. Example:\n```\n{\n "tags" : {\n "WORKFLOW_ID" : "cwl-01234",\n ' - '"PROJECT_GROUP" : "alice-lab"\n }\n}\n```', - example={'WORKFLOW_ID': 'cwl-01234', 'PROJECT_GROUP': 'alice-lab'}, - ) - logs: Optional[List[TesTaskLog]] = Field( - None, - description='Task logging information.\nNormally, this will contain only one entry, but in the case ' - 'where\na task fails and is retried, an entry will be appended to this list.', - ) - creation_time: Optional[str] = Field( - None, - description='Date + time the task was created, in RFC 3339 format.\nThis is set by the system, not the client.', - example='2020-10-02T10:00:00-05:00', - ) - - -class TesListTasksResponse(BaseModel): - tasks: List[TesTask] = Field( - ..., - description='List of tasks. These tasks will be based on the original submitted\ntask document, but with ' - 'other fields, such as the job state and\nlogging info, added/changed as the job progresses.', - ) - next_page_token: Optional[str] = Field( - None, - description='Token used to return the next page of results. This value can be used\nin the `page_token` ' - 'field of the next ListTasks request.', - ) - - -# Extra models manually added for Minimal View -class TesTaskMinimal(BaseModel): - id: str = Field( - ..., - description='Task identifier assigned by the server.', - example='job-0012345', - ) - state: TesState = Field(..., example='UNKNOWN') - - -class TesListTasksResponseMinimal(BaseModel): - tasks: List[TesTaskMinimal] = Field( - ..., - description='List of tasks. These tasks will be based on the original submitted\ntask document, but with ' - 'other fields, such as the job state and\nlogging info, added/changed as the job progresses.', - ) diff --git a/compliance_suite/report_server.py b/compliance_suite/report_server.py index 397b69e..ba1fec0 100644 --- a/compliance_suite/report_server.py +++ b/compliance_suite/report_server.py @@ -5,8 +5,10 @@ """ import http.server +import importlib.resources import json import os +from pathlib import Path import socketserver import threading import time @@ -38,9 +40,23 @@ def render_html(self) -> None: with open(os.path.join(self.web_dir, "web_report.json"), "r") as f: report_data = json.load(f) + with importlib.resources.path("compliance_suite", "web") as dir_path: + web_dir_path = dir_path.resolve() + + header_data = { + "path_jquery": str(web_dir_path) + "/public/jquery.jsonPresenter.css", + "path_index_css": str(web_dir_path) + "/public/index.css", + "path_report_js": str(web_dir_path) + "/views/report.js" + } + # Render the HTML via Jinja templates - view_loader = j2.FileSystemLoader(searchpath=self.web_dir) + view_loader = j2.FileSystemLoader(searchpath=web_dir_path) view_env = j2.Environment(loader=view_loader) + header_template = view_env.get_template("partials/header.html") + header_rendered = header_template.render(data=header_data) + with open(str(web_dir_path) + "/partials/header.html", "w+") as output: + output.write(header_rendered) + report_template = view_env.get_template("views/report.html") report_rendered = report_template.render(data=report_data) diff --git a/compliance_suite/suite_validator.py b/compliance_suite/suite_validator.py new file mode 100644 index 0000000..b96e2e5 --- /dev/null +++ b/compliance_suite/suite_validator.py @@ -0,0 +1,70 @@ +"""Module compliance_suite.suite_validator.py + +This module contains class definition for Suite Validator to validate the tests, templates and models present in the +test suite repository +""" + +import ast +from pathlib import Path + +from compliance_suite.constants.constants import ( + TEMPLATE, + TEST +) +from compliance_suite.exceptions.compliance_exception import JobValidationException +from compliance_suite.functions.log import logger +from compliance_suite.utils.test_utils import load_and_validate_yaml_data + + +class SuiteValidator: + """Class to validate the test suite""" + + @staticmethod + def check_directory(directory_path: Path, directory_type: str): + """Check if the directory exists and contains required files. Validate the file format and schema""" + + if not directory_path.exists() and directory_path.is_dir(): + raise JobValidationException(name="Validation failed", + message=f"Required directory {directory_type} not present in the test suite", + details="NULL") + + files = list(directory_path.glob("*specs.py" if directory_type == "models" else "*.yml")) + + if not files: + raise JobValidationException( + name="Validation failed", + message=f"No files present within {directory_type} directory inside test suite", + details="NULL") + + for file in files: + if directory_type == "tests": + load_and_validate_yaml_data(str(file), TEST) + elif directory_type == "templates": + load_and_validate_yaml_data(str(file), TEMPLATE) + else: + try: + with file.open('r') as f: + ast.parse(f.read()) + logger.info(f"Python file valid for Model: {f.name}") + except SyntaxError as err: + logger.error(f"Syntax error in model file {f.name}") + raise JobValidationException( + name="Validation failed", + message=f"Invalid model file {f.name}", + details=err.__str__()) + + @staticmethod + def validate(): + """Performs multiple validation checks on the test suite""" + + project_root_dir = Path.cwd().resolve() + required_directories = ["models", "tests", "templates"] + + try: + for directory_type in required_directories: + directory_path = project_root_dir / directory_type + SuiteValidator.check_directory(directory_path, directory_type) + + except JobValidationException as err: + logger.error("Test suite validation failed.") + logger.error(err) diff --git a/docs/test_config/common_schema.json b/compliance_suite/test_config/common_schema.json similarity index 92% rename from docs/test_config/common_schema.json rename to compliance_suite/test_config/common_schema.json index 4c28f3f..291b29b 100644 --- a/docs/test_config/common_schema.json +++ b/compliance_suite/test_config/common_schema.json @@ -9,14 +9,7 @@ "properties": { "name": { "type": "string", - "description": "The sub-job name. It should be within the specified enum for consistency", - "enum": [ - "service_info", - "list_tasks", - "create_task", - "get_task", - "cancel_task" - ] + "description": "The sub-job name. It should be within the specified enum for consistency" }, "description": { "type": "string", @@ -24,13 +17,7 @@ }, "endpoint": { "type": "string", - "description": "The request endpoint. It should be within the specified enum for consistency", - "enum": [ - "/service-info", - "/tasks", - "/tasks/{id}", - "/tasks/{id}:cancel" - ] + "description": "The request endpoint. It should be within the specified enum for consistency" }, "operation": { "type": "string", diff --git a/docs/test_config/template_schema.json b/compliance_suite/test_config/template_schema.json similarity index 100% rename from docs/test_config/template_schema.json rename to compliance_suite/test_config/template_schema.json diff --git a/docs/test_config/test_schema.json b/compliance_suite/test_config/test_schema.json similarity index 82% rename from docs/test_config/test_schema.json rename to compliance_suite/test_config/test_schema.json index 2e6c496..29bd86a 100644 --- a/docs/test_config/test_schema.json +++ b/compliance_suite/test_config/test_schema.json @@ -10,10 +10,7 @@ }, "service": { "type": "string", - "description": "The GA4GH service name", - "enum": [ - "TES" - ] + "description": "The GA4GH service name" }, "versions": { "type": "array", @@ -24,7 +21,7 @@ }, "tags": { "type": "array", - "description": "The list of tags which define the test file. Example. Logical - No Polling needed, Functional - Polling needed. Always add 3 tags - Individual tag based on test name, TES endpoint tag and All tag", + "description": "The list of tags which define the test file.", "items": { "type": "string" }, @@ -45,7 +42,7 @@ "properties": { "$ref":{ "type": "string", - "description": "The relative path to a test template." + "description": "The relative path from root to a test template." }, "args": { "type": "object", diff --git a/docs/test_config/test_syntax.yml b/compliance_suite/test_config/test_syntax.yml similarity index 100% rename from docs/test_config/test_syntax.yml rename to compliance_suite/test_config/test_syntax.yml diff --git a/compliance_suite/test_runner.py b/compliance_suite/test_runner.py index 3a85969..d1f5ba7 100644 --- a/compliance_suite/test_runner.py +++ b/compliance_suite/test_runner.py @@ -3,20 +3,21 @@ This module contains class definition for Test Runner to run the individual jobs, validate them and store their result """ -import importlib +import importlib.util import json +from pathlib import Path import re from typing import ( Any, Dict ) +import yaml from dotmap import DotMap from ga4gh.testbed.report.test import Test from pydantic import ValidationError from requests.models import Response -from compliance_suite.constants.constants import ENDPOINT_TO_MODEL from compliance_suite.exceptions.compliance_exception import ( JobValidationException, TestFailureException @@ -30,21 +31,20 @@ class TestRunner(): """Class to run individual jobs by sending requests to the server endpoints. It stores the data to be used by other jobs. It validates the request, response and their schemas""" - def __init__(self, service: str, server: str, version: str): + def __init__(self, server: str, version: str): """Initialize the Test Runner object Args: - service (str): The GA4GH service name (eg. TES) server (str): The server URL to send the request version (str): The version of the deployed server """ - self.service: str = service self.server: str = server self.version: str = version self.job_data: Any = None self.auxiliary_space: Dict = {} # Dictionary to store the sub-job results self.report_test: Any = None # Test object to store the result + self.api_config = self.get_api_config() def set_job_data(self, job_data: Any) -> None: """Set the individual sub job data @@ -74,13 +74,24 @@ def set_report_test(self, report_test: Test) -> None: self.report_test = report_test + def get_api_config(self) -> Any: + """Retrieve the API config from Tests Repo and set the api_config""" + + api_config_path = Path("api_config.yml") + try: + return yaml.safe_load(open(api_config_path, "r")) + except yaml.YAMLError as err: + raise TestFailureException(name="YAML Error", + message=f"Invalid YAML file {api_config_path} inside tests repo", + details=err) + def validate_logic( self, endpoint_model: str, json_data: Any, message: str ) -> None: - """ Validates if the response is in accordance with the TES API Specs and Models. Validation is done via + """ Validates if the response is in accordance with the API Specs and Models. Validation is done via Pydantic generated models Args: @@ -95,9 +106,14 @@ def validate_logic( description="Check if response matches the model schema") try: - pydantic_module: Any = importlib.import_module( - "compliance_suite.models.v" + self.version.replace('.', '_') + "_specs") - pydantic_model_class: Any = getattr(pydantic_module, ENDPOINT_TO_MODEL[endpoint_model]) + model_file_name = "v" + self.version.replace('.', '_') + "_specs.py" + model_path = Path("models/" + model_file_name) + spec = importlib.util.spec_from_file_location("models." + model_file_name, str(model_path)) + pydantic_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(pydantic_module) + + pydantic_model_name: str = self.api_config["ENDPOINT_TO_MODEL"][endpoint_model] + pydantic_model_class: Any = getattr(pydantic_module, pydantic_model_name) pydantic_model_class(**json_data) # JSON validation against Pydantic Model logger.info(f'{message} Schema validation successful for ' f'{self.job_data["operation"]} {self.job_data["endpoint"]}') @@ -197,7 +213,7 @@ def validate_response( response_json: Any = {} # Handle the Cancel Task Endpoint empty response else: response_json: Any = response.json() - + # TODO if self.job_data["name"] in ["list_tasks", "get_task"]: view_query: str = "" for query_param in self.job_data["query_parameters"]: @@ -353,11 +369,12 @@ def run_tests( query_params.update(param) self.transform_parameters(query_params) - if self.job_data["name"] in ["create_task"]: + if "request_body" in self.job_data: request_body: str = self.job_data["request_body"] self.validate_request_body(request_body) client = Client() + client.set_request_headers(self.api_config["REQUEST_HEADERS"]) if "polling" in self.job_data.keys(): @@ -365,14 +382,14 @@ def run_tests( if "env_vars" in self.job_data.keys() and "check_cancel" in self.job_data["env_vars"].keys(): check_cancel = self.job_data["env_vars"]["check_cancel"] - response = client.poll_request(service=self.service, server=self.server, version=self.version, + response = client.poll_request(server=self.server, version=self.version, endpoint=self.job_data["endpoint"], path_params=path_params, query_params=query_params, operation=self.job_data["operation"], polling_interval=self.job_data["polling"]["interval"], polling_timeout=self.job_data["polling"]["timeout"], check_cancel_val=check_cancel) else: - response = client.send_request(service=self.service, server=self.server, version=self.version, + response = client.send_request(server=self.server, version=self.version, endpoint=self.job_data["endpoint"], path_params=path_params, query_params=query_params, operation=self.job_data["operation"], request_body=request_body) diff --git a/compliance_suite/utils/test_utils.py b/compliance_suite/utils/test_utils.py index 85b4b16..d0ec723 100644 --- a/compliance_suite/utils/test_utils.py +++ b/compliance_suite/utils/test_utils.py @@ -3,12 +3,25 @@ This module contains the utility functions to perform actions on test files """ +import importlib.resources +from pathlib import Path from typing import ( Any, List, Union ) +from jsonschema import ( + RefResolver, + validate, + ValidationError +) +import yaml + +from compliance_suite.constants.constants import TEST +from compliance_suite.exceptions.compliance_exception import JobValidationException +from compliance_suite.functions.log import logger + def tag_matcher( include_tags: List[str], @@ -57,3 +70,46 @@ def replace_string(data: Any, search_str: str, replace_str: Union[str, int]) -> return data elif isinstance(data, str) or isinstance(data, int): return replace_str if data == search_str else data + + +def load_and_validate_yaml_data(yaml_file: str, _type: str) -> Any: + """ + Load and validate YAML data from the file with the provided schema type. + + Args: + yaml_file: The path to the YAML file. + _type: The type of YAML file, either "Test" or "Template". + + Returns: + The loaded and validated YAML data. + """ + + # Load YAML data + try: + # yaml_data = yaml.safe_load(open(yaml_file if _type == TEST else "tmp/testdir/"+yaml_file, "r")) + yaml_data = yaml.safe_load(open(yaml_file, "r")) + except yaml.YAMLError as err: + raise JobValidationException(name="YAML Error", + message=f"Invalid YAML file {yaml_file}", + details=err) + + # Validate YAML data with schema + with importlib.resources.path("compliance_suite", "test_config") as dir_path: + schema_dir_path = dir_path.resolve() + test_schema_path = Path(schema_dir_path/"test_schema.json") + template_schema_path = Path(schema_dir_path/"template_schema.json") + schema_file_path = str(test_schema_path if _type == TEST else template_schema_path) + json_schema = yaml.safe_load(open(schema_file_path, "r")) + + try: + # Python-jsonschema does not reference local files directly + # Refer solution from https://github.com/python-jsonschema/jsonschema/issues/98#issuecomment-105475109 + resolver = RefResolver('file:///' + str(schema_dir_path).replace("\\", "/") + '/', None) + validate(yaml_data, json_schema, resolver=resolver) + logger.info(f'YAML file valid for {_type}: {yaml_file}') + except ValidationError as err: + raise JobValidationException(name="YAML Schema Validation Error", + message=f"YAML file {yaml_file} does not match the {_type} schema", + details=err.message) + + return yaml_data diff --git a/compliance_suite/web/partials/header.html b/compliance_suite/web/partials/header.html index 6cde412..cc9cb32 100644 --- a/compliance_suite/web/partials/header.html +++ b/compliance_suite/web/partials/header.html @@ -7,12 +7,12 @@ - - + + - + \ No newline at end of file diff --git a/docs/test_structure.md b/docs/test_structure.md index 932b70e..9880c18 100644 --- a/docs/test_structure.md +++ b/docs/test_structure.md @@ -82,4 +82,4 @@ A JSON report is published at the end according to the `ga4gh-testbed-lib` stand ![Json_Report](/docs/images/json_report.JPG) -[res-test-schema]: test_config/test_schema.json \ No newline at end of file +[res-test-schema]: ../compliance_suite/test_config/test_schema.json \ No newline at end of file diff --git a/docs/utility.md b/docs/utility.md index 713c406..12f2c16 100644 --- a/docs/utility.md +++ b/docs/utility.md @@ -178,6 +178,6 @@ tespassword=$(jq -r '.TesPassword' TesCredentials.json) tes-compliance-suite report --server http://$tesuser:$tespassword@$teshostname/ --include-tags all --output_path results ``` -[res-test-syntax]: test_config/test_syntax.yml +[res-test-syntax]: ../compliance_suite/test_config/test_syntax.yml [dockerfile]: ../docker/Dockerfile [entrypoint]: ../docker/entrypoint.sh \ No newline at end of file diff --git a/setup.py b/setup.py index 3975f5b..f52f165 100644 --- a/setup.py +++ b/setup.py @@ -10,9 +10,9 @@ install_requires: list = _file.read().splitlines() setup( - name='tes-compliance-suite', + name='openapi-compliance-suite', version='0.1.0', - description='TES Compliance Suite to perform conformance testing to API specs and functionality', + description='OpenAPI Compliance Suite to perform conformance testing to API specs and functionality', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/elixir-cloud-aai/tes-compliance-suite', @@ -38,11 +38,11 @@ ], entry_points={ 'console_scripts': [ - 'tes-compliance-suite = compliance_suite.cli:main', + 'compliance-suite = compliance_suite.cli:main', ], }, keywords=( - 'ga4gh tes elixir rest api app server openapi ' + 'elixir rest api app server openapi ' 'python compliance testing pydantic yaml ' ), project_urls={ @@ -51,8 +51,10 @@ 'Tracker': 'https://github.com/elixir-cloud-aai/tes-compliance-suite/issues', }, license='Apache License 2.0', - packages=find_packages(), - package_data={'': ['../tests/*', 'web/*/*']}, + packages=find_packages(exclude=[ + 'unittests', + 'unittests.*' + ]), + package_data={'': ['test_config/*', 'web/*/*']}, install_requires=install_requires, - include_package_data=True, ) diff --git a/templates/cancel_task_template.yml b/templates/cancel_task_template.yml deleted file mode 100644 index c8b7108..0000000 --- a/templates/cancel_task_template.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: cancel_task - description: Cancel a TES task - endpoint: /tasks/{id}:cancel - operation: POST - path_parameters: - id: "{id}" - response: - 200: diff --git a/templates/create_task_template.yml b/templates/create_task_template.yml deleted file mode 100644 index f4669d1..0000000 --- a/templates/create_task_template.yml +++ /dev/null @@ -1,22 +0,0 @@ -- name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ] - } - storage_vars: - id: $response.id - response: - 200: {} \ No newline at end of file diff --git a/templates/get_task_polling_template.yml b/templates/get_task_polling_template.yml deleted file mode 100644 index 5ac7610..0000000 --- a/templates/get_task_polling_template.yml +++ /dev/null @@ -1,13 +0,0 @@ -- name: get_task - description: Retrieve the task details for TES task - endpoint: /tasks/{id} - operation: GET - path_parameters: - id: "{id}" - query_parameters: - - view: "{view_value}" - polling: - interval: "{polling_interval_value}" - timeout: "{polling_timeout_value}" - response: - 200: \ No newline at end of file diff --git a/templates/get_task_template.yml b/templates/get_task_template.yml deleted file mode 100644 index 10fbda1..0000000 --- a/templates/get_task_template.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: get_task - description: Retrieve the task details for TES task - endpoint: /tasks/{id} - operation: GET - path_parameters: - id: "{id}" - query_parameters: - - view: "{view_value}" - response: - 200: {} \ No newline at end of file diff --git a/templates/list_tasks_template.yml b/templates/list_tasks_template.yml deleted file mode 100644 index f13b94f..0000000 --- a/templates/list_tasks_template.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: list_tasks - description: Retrieve the list of tasks tracked by the TES server - endpoint: /tasks - operation: GET - query_parameters: - - view: "{view_value}" - response: - 200: {} \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/cancel_task.yml b/tests/cancel_task.yml deleted file mode 100644 index 4fe6240..0000000 --- a/tests/cancel_task.yml +++ /dev/null @@ -1,10 +0,0 @@ -description: Job to cancel a TES Task -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/cancel_task_template.yml" diff --git a/tests/cancel_task_functional.yml b/tests/cancel_task_functional.yml deleted file mode 100644 index 8bbf279..0000000 --- a/tests/cancel_task_functional.yml +++ /dev/null @@ -1,39 +0,0 @@ -description: Job to cancel a TES Task -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: [] -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/cancel_task_template.yml" - - name: get_task - description: Retrieve the list of tasks tracked by the TES server - endpoint: /tasks/{id} - operation: GET - path_parameters: - id: "{id}" - query_parameters: - - view: MINIMAL - polling: - interval: 10 - timeout: 3600 - env_vars: - check_cancel: True - response: - 200: | - { - "tasks": [ - { - "executors": [ - { - "image": "ubuntu:20.04", - "command": [ - "/bin/md5", - "/data/file1" - ] - } - ] - } - ] - } diff --git a/tests/create_task.yml b/tests/create_task.yml deleted file mode 100644 index 97c546d..0000000 --- a/tests/create_task.yml +++ /dev/null @@ -1,9 +0,0 @@ -description: Job to create a new TES Task -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" \ No newline at end of file diff --git a/tests/create_task_backend_parameters.yml b/tests/create_task_backend_parameters.yml deleted file mode 100644 index d98687c..0000000 --- a/tests/create_task_backend_parameters.yml +++ /dev/null @@ -1,34 +0,0 @@ -description: Job to create a new TES Task with backend parameters -service: TES -versions: - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "resources": { - "backend_parameters": { - "VmSize" : "Standard_D64_v3", - "Caching" : "ReadWrite" - }, - "backend_parameters_strict": false - } - } - response: - 200: {} diff --git a/tests/create_task_backend_parameters_negative.yml b/tests/create_task_backend_parameters_negative.yml deleted file mode 100644 index de9a0dd..0000000 --- a/tests/create_task_backend_parameters_negative.yml +++ /dev/null @@ -1,35 +0,0 @@ -description: Job to create a new TES Task with backend parameters (Negative case) -service: TES -versions: - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "resources": { - "backend_parameters": { - "INVALID" : "PARAMETER" - }, - "backend_parameters_strict": true - } - } - response: - # Keeping response code as 400 to test intentionally wrong backend parameters - # https://github.com/elixir-cloud-aai/tes-compliance-suite/pull/29#discussion_r1108893420 - 400: {} diff --git a/tests/create_task_functional.yml b/tests/create_task_functional.yml deleted file mode 100644 index c7a0be8..0000000 --- a/tests/create_task_functional.yml +++ /dev/null @@ -1,13 +0,0 @@ -description: Job to create a new TES Task and test functionality -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: [] -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/get_task_polling_template.yml" - args: - view_value: "MINIMAL" - polling_interval_value: 10 - polling_timeout_value: 3600 diff --git a/tests/create_task_ignore_error.yml b/tests/create_task_ignore_error.yml deleted file mode 100644 index 897bb1c..0000000 --- a/tests/create_task_ignore_error.yml +++ /dev/null @@ -1,93 +0,0 @@ -description: Job to create a new TES Task with ignore_error flag -service: TES -versions: - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: create_task - description: Ignore error flag as false, so executor should fail at first command - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "ERROR" - ], - "ignore_error": false - }, - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ] - } - storage_vars: - id1: $response.id - response: - 200: {} - - name: get_task - description: Retrieve the task details for TES task - endpoint: /tasks/{id} - operation: GET - path_parameters: - id: "{id1}" - query_parameters: - - view: FULL - filter: - - path: $response.logs[0].logs - type: array - size: 1 - response: - 200: {} - - name: create_task - description: Ignore error flag as true, so executor should fail at second command - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "ERROR" - ], - "ignore_error": true - }, - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ] - } - storage_vars: - id2: $response.id - response: - 200: {} - - name: get_task - description: Retrieve the task details for TES task - endpoint: /tasks/{id} - operation: GET - path_parameters: - id: "{id2}" - query_parameters: - - view: FULL - filter: - - path: $response.logs[0].logs - type: array - size: 2 - response: - 200: {} diff --git a/tests/create_task_inputs.yml b/tests/create_task_inputs.yml deleted file mode 100644 index 43dea05..0000000 --- a/tests/create_task_inputs.yml +++ /dev/null @@ -1,35 +0,0 @@ -description: Job to create a new TES Task with inputs -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "inputs": [ - { - "url": "s3://my-object-store/file-1", - "path": "/data/file1", - "type": "FILE" - } - ] - } - response: - 200: {} diff --git a/tests/create_task_optional_filetype.yml b/tests/create_task_optional_filetype.yml deleted file mode 100644 index cc5c0b1..0000000 --- a/tests/create_task_optional_filetype.yml +++ /dev/null @@ -1,39 +0,0 @@ -description: Job to create a new TES Task intentionally without input and output filetypes -service: TES -versions: - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "inputs": [ - { - "url": "s3://my-object-store/file-1", - "path": "/data/file1" - } - ], - "outputs": [ - { - "url": "s3://my-object-store/outfile-1", - "path": "/data/outfile" - } - ] - } - response: - 200: {} diff --git a/tests/create_task_outputs.yml b/tests/create_task_outputs.yml deleted file mode 100644 index 163c9d2..0000000 --- a/tests/create_task_outputs.yml +++ /dev/null @@ -1,35 +0,0 @@ -description: Job to create a new TES Task with outputs -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "outputs": [ - { - "url": "s3://my-object-store/outfile-1", - "path": "/data/outfile", - "type": "FILE" - } - ] - } - response: - 200: {} diff --git a/tests/create_task_streamable.yml b/tests/create_task_streamable.yml deleted file mode 100644 index 040288d..0000000 --- a/tests/create_task_streamable.yml +++ /dev/null @@ -1,36 +0,0 @@ -description: Job to create a new TES Task with streamable flag set in input -service: TES -versions: - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "CompTest", - "description": "CompTest", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "input": [ - { - "name": "access-file", - "description": "Access file resource via streaming", - "url": "s3://my-object-store/file1", - "path": "/data/file1", - "streamable": true - } - ] - } - response: - 200: {} diff --git a/tests/filter_task_by_name.yml b/tests/filter_task_by_name.yml deleted file mode 100644 index 5e95956..0000000 --- a/tests/filter_task_by_name.yml +++ /dev/null @@ -1,108 +0,0 @@ -description: Job to test the filter task by name feature -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: [] -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "e70dda48-ComplianceTest", - "description": "Compliance Test", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ] - } - response: - 200: - - name: list_tasks - description: Retrieve the list of tasks tracked by the TES server - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - name_prefix: e70dda48-ComplianceTest - filter: - - path: $response.tasks[0].name - type: string - value: e70dda48-ComplianceTest - size: 23 - response: - 200: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "b51f4c41-ComplianceTest", - "description": "Compliance Test", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ] - } - response: - 200: - - name: list_tasks - description: Retrieve the list of tasks tracked by the TES server - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - name_prefix: b51f4c41 - filter: - - path: $response.tasks[0].name - type: string - regex: True - value: ^b51f4c41 - response: - 200: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "ComplianceTest-f9327c19", - "description": "Compliance Test", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ] - } - response: - 200: - - name: list_tasks - description: Retrieve the list of tasks tracked by the TES server - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - name_prefix: f9327c19 - filter: - - path: $response.tasks - type: array - size: 0 - response: - 200: \ No newline at end of file diff --git a/tests/filter_task_by_state.yml b/tests/filter_task_by_state.yml deleted file mode 100644 index b3e1aef..0000000 --- a/tests/filter_task_by_state.yml +++ /dev/null @@ -1,25 +0,0 @@ -description: Job to test the filter task by state feature -service: TES -versions: - - 1.1.0 -tags: [] -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/get_task_polling_template.yml" - args: - view_value: "MINIMAL" - polling_interval_value: 10 - polling_timeout_value: 3600 - - name: list_tasks - description: Retrieve the list of tasks tracked by the TES server - endpoint: /tasks - operation: GET - query_parameters: - - view: MINIMAL - - state: COMPLETE - filter: - - path: $response.tasks[0].state - type: string - value: COMPLETE - response: - 200: \ No newline at end of file diff --git a/tests/filter_task_by_tag.yml b/tests/filter_task_by_tag.yml deleted file mode 100644 index e8fcb1f..0000000 --- a/tests/filter_task_by_tag.yml +++ /dev/null @@ -1,176 +0,0 @@ -description: Job to test the filter task by tag feature -service: TES -versions: - - 1.1.0 -tags: [] -jobs: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "ComplianceTest_FilterTaskByTag1", - "description": "Compliance Test", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "tags": { - "foo": "bar", - "baz": "bat", - "lorem": "ipsum", - "task": "create" - } - } - response: - 200: - - name: list_tasks - description: All tag key-value pairs with jumbled query parameter order - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - tag_key: [foo, lorem, task, baz] - - tag_value: [bar, ipsum, create, bat] - filter: - - path: $response.tasks[0].tags - type: object - value: | - { - "foo": "bar", - "baz": "bat", - "lorem": "ipsum", - "task": "create" - } - size: 4 - response: - 200: - - name: list_tasks - description: Two tags pairs, one tag key without tag value and one less tag pair - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - tag_key: [foo, lorem, baz] - - tag_value: [bar, ipsum] - filter: - - path: $response.tasks[0].tags - type: object - value: | - { - "foo": "bar", - "baz": "bat", - "lorem": "ipsum", - "task": "create" - } - size: 4 - response: - 200: - - name: list_tasks - description: More tags than defined in task - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - tag_key: [foo, lorem, extra] - - tag_value: [bar, ipsum] - filter: - - path: $response.tasks - type: array - size: 0 - response: - 200: - - name: list_tasks - description: Incorrect tag value - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - tag_key: foo - - tag_value: wrong_value - filter: - - path: $response.tasks - type: array - size: 0 - response: - 200: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "ComplianceTest_FilterTaskByTag2", - "description": "Compliance Test", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ], - "tags": { - "abcxyz": "" - } - } - response: - 200: - - name: list_tasks - description: Empty tag value - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - tag_key: random - filter: - - path: $response.tasks[0].tags - type: object - value: | - { - "abcxyz": "" - } - size: 1 - response: - 200: - - name: create_task - description: Create a new TES task - endpoint: /tasks - operation: POST - request_body: | - { - "name": "ComplianceTest_FilterTaskByTag4", - "description": "Compliance Test", - "executors": [ - { - "image": "alpine", - "command": [ - "echo", - "hello" - ] - } - ] - } - response: - 200: - - name: list_tasks - description: No tags defined - endpoint: /tasks - operation: GET - query_parameters: - - view: BASIC - - tag_key: tag_key_not_defined - - tag_value: tag_value_not_defined - filter: - - path: $response.tasks - type: array - size: 0 - response: - 200: \ No newline at end of file diff --git a/tests/get_task_basic.yml b/tests/get_task_basic.yml deleted file mode 100644 index 335e063..0000000 --- a/tests/get_task_basic.yml +++ /dev/null @@ -1,12 +0,0 @@ -description: Job to retrieve the Basic view of TES Task -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/get_task_template.yml" - args: - view_value: "BASIC" \ No newline at end of file diff --git a/tests/get_task_full.yml b/tests/get_task_full.yml deleted file mode 100644 index a47455e..0000000 --- a/tests/get_task_full.yml +++ /dev/null @@ -1,12 +0,0 @@ -description: Job to retrieve the Full view of TES Task -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/get_task_template.yml" - args: - view_value: "FULL" \ No newline at end of file diff --git a/tests/get_task_minimal.yml b/tests/get_task_minimal.yml deleted file mode 100644 index bfa9ec4..0000000 --- a/tests/get_task_minimal.yml +++ /dev/null @@ -1,12 +0,0 @@ -description: Job to retrieve the Minimal view of TES Task -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/get_task_template.yml" - args: - view_value: "MINIMAL" \ No newline at end of file diff --git a/tests/list_tasks_basic.yml b/tests/list_tasks_basic.yml deleted file mode 100644 index aea4956..0000000 --- a/tests/list_tasks_basic.yml +++ /dev/null @@ -1,12 +0,0 @@ -description: Job to retrieve the list of Basic view of TES tasks -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/list_tasks_template.yml" - args: - view_value: "BASIC" diff --git a/tests/list_tasks_full.yml b/tests/list_tasks_full.yml deleted file mode 100644 index 7f70f60..0000000 --- a/tests/list_tasks_full.yml +++ /dev/null @@ -1,12 +0,0 @@ -description: Job to retrieve the list of Full view of TES tasks -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/list_tasks_template.yml" - args: - view_value: "FULL" diff --git a/tests/list_tasks_minimal.yml b/tests/list_tasks_minimal.yml deleted file mode 100644 index 47b14f9..0000000 --- a/tests/list_tasks_minimal.yml +++ /dev/null @@ -1,12 +0,0 @@ -description: Job to retrieve the list of Minimal view of TES tasks -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/list_tasks_template.yml" - args: - view_value: "MINIMAL" diff --git a/tests/list_tasks_page_size.yml b/tests/list_tasks_page_size.yml deleted file mode 100644 index 45043b3..0000000 --- a/tests/list_tasks_page_size.yml +++ /dev/null @@ -1,35 +0,0 @@ -description: Job to validate page size in list tasks -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: [] -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/create_task_template.yml" - - name: list_tasks - description: Set page_size as 1 - endpoint: /tasks - operation: GET - query_parameters: - - view: MINIMAL - - page_size: 1 - filter: - - path: $response.tasks - type: array - size: 1 - response: - 200: {} - - name: list_tasks - description: Set page_size as 2 - endpoint: /tasks - operation: GET - query_parameters: - - view: MINIMAL - - page_size: 2 - filter: - - path: $response.tasks - type: array - size: 2 - response: - 200: {} diff --git a/tests/list_tasks_page_token.yml b/tests/list_tasks_page_token.yml deleted file mode 100644 index dfab396..0000000 --- a/tests/list_tasks_page_token.yml +++ /dev/null @@ -1,38 +0,0 @@ -description: Job to validate page token in list tasks -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: [] -jobs: - - $ref: "./templates/create_task_template.yml" - - $ref: "./templates/create_task_template.yml" - - name: list_tasks - description: Store the next_page_token - endpoint: /tasks - operation: GET - query_parameters: - - view: MINIMAL - - page_size: 1 - filter: - - path: $response.tasks - type: array - size: 1 - storage_vars: - next_page_token: $response.next_page_token - response: - 200: {} - - name: list_tasks - description: Fetch response after sending stored next_page_token value - endpoint: /tasks - operation: GET - query_parameters: - - view: MINIMAL - - page_size: 1 - - page_token: "{next_page_token}" - filter: - - path: $response.tasks - type: array - size: 1 - response: - 200: {} diff --git a/tests/service_info.yml b/tests/service_info.yml deleted file mode 100644 index d28761c..0000000 --- a/tests/service_info.yml +++ /dev/null @@ -1,17 +0,0 @@ -description: Job to retrieve the service info -service: TES -versions: - - 1.0.0 - - 1.1.0 -tags: - - schema_validation_only -jobs: - - name: service_info - description: Retrieve the TES server info - endpoint: /service-info - operation: GET - response: - 200: | - { - "id": "uk.ac.ebi.tsc.tesk" - } \ No newline at end of file diff --git a/unittests/data/run_job_tests/fail_service_info.yml b/unittests/data/run_job_tests/fail_service_info.yml index 2c0bea0..b0249bc 100644 --- a/unittests/data/run_job_tests/fail_service_info.yml +++ b/unittests/data/run_job_tests/fail_service_info.yml @@ -1,16 +1,15 @@ -description: Job to retrieve the service info -service: TES +description: test +service: test versions: - 1.0.0 tags: - - schema_validation_only + - test jobs: - - name: service_info - description: Retrieve the TES server info - endpoint: /service-info + - name: test + description: test + endpoint: test operation: GET response: 200: | { - "id": "uk.ac.ebi.tsc.tesk" } \ No newline at end of file diff --git a/unittests/data/run_job_tests/invalid_yaml.yml b/unittests/data/run_job_tests/invalid_yaml.yml index 91d91f4..59ab1f9 100644 --- a/unittests/data/run_job_tests/invalid_yaml.yml +++ b/unittests/data/run_job_tests/invalid_yaml.yml @@ -1,17 +1,16 @@ Invalid YAML -description: Job to retrieve the service info -service: TES +description: test +service: test versions: - 1.0.0 tags: - - schema_validation_only + - test jobs: - - name: service_info - description: Retrieve the TES server info - endpoint: /service-info + - name: test + description: test + endpoint: test operation: GET response: 200: | { - "id": "uk.ac.ebi.tsc.tesk" } \ No newline at end of file diff --git a/unittests/data/run_job_tests/skip_01.yml b/unittests/data/run_job_tests/skip_01.yml index 2c875c8..6814c75 100644 --- a/unittests/data/run_job_tests/skip_01.yml +++ b/unittests/data/run_job_tests/skip_01.yml @@ -1,15 +1,14 @@ -description: Job to retrieve the service info -service: TES +description: test +service: test versions: - 0.0.0 tags: [] jobs: - - name: service_info - description: Retrieve the TES server info - endpoint: /service-info + - name: test + description: test + endpoint: test operation: GET response: 200: | { - "id": "uk.ac.ebi.tsc.tesk" } \ No newline at end of file diff --git a/unittests/data/run_job_tests/success_01.yml b/unittests/data/run_job_tests/success_01.yml index 01f690f..34c56d9 100644 --- a/unittests/data/run_job_tests/success_01.yml +++ b/unittests/data/run_job_tests/success_01.yml @@ -1,19 +1,18 @@ -description: Job to retrieve the service info -service: TES +description: test +service: test versions: - 1.0.0 tags: - - schema_validation_only + - test jobs: - - name: service_info - description: Retrieve the TES server info - endpoint: /service-info + - name: test + description: test + endpoint: test operation: GET response: 200: | { - "id": "uk.ac.ebi.tsc.tesk" } - $ref: "unittests/data/templates/success_template.yml" args: - view_value: "MINIMAL" \ No newline at end of file + test: "test" \ No newline at end of file diff --git a/unittests/data/templates/success_template.yml b/unittests/data/templates/success_template.yml index f13b94f..0d8824e 100644 --- a/unittests/data/templates/success_template.yml +++ b/unittests/data/templates/success_template.yml @@ -1,8 +1,8 @@ -- name: list_tasks - description: Retrieve the list of tasks tracked by the TES server - endpoint: /tasks +- name: test + description: test + endpoint: test operation: GET query_parameters: - - view: "{view_value}" + - test: "test" response: 200: {} \ No newline at end of file diff --git a/unittests/data/test_config/template_schema.json b/unittests/data/test_config/template_schema.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/unittests/data/test_config/template_schema.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/unittests/data/test_config/test_schema.json b/unittests/data/test_config/test_schema.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/unittests/data/test_config/test_schema.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/unittests/data/tests/wrong_schema_yaml.yml b/unittests/data/tests/wrong_schema_yaml.yml index 514094a..9a994b7 100644 --- a/unittests/data/tests/wrong_schema_yaml.yml +++ b/unittests/data/tests/wrong_schema_yaml.yml @@ -1,15 +1,14 @@ -description: Job to retrieve the service info +description: test versions: - 1.0.0 tags: - - schema_validation_only + - test jobs: - - name: service_info - description: Retrieve the TES server info - endpoint: /service-info + - name: test + description: test + endpoint: test operation: GET response: 200: | { - "id": "uk.ac.ebi.tsc.tesk" } \ No newline at end of file diff --git a/unittests/test_cli.py b/unittests/test_cli.py index c42b095..54f33e6 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -35,7 +35,7 @@ def test_report_no_tag(self, mock_run_jobs, mock_generate_reports): mock_run_jobs.return_value = {} mock_generate_reports.return_value = '{"test": "test"}' runner = CliRunner() - result = runner.invoke(report, ['--server', TEST_URL, '--version', '1.0.0']) + result = runner.invoke(report, ['--server', TEST_URL, '--version', '1.0.0', '--test-path', 'unittests']) assert result.exit_code == 0 @patch.object(ReportServer, 'serve_thread') @@ -51,14 +51,15 @@ def test_report(self, mock_run_jobs, mock_generate_reports, mock_report_server): runner = CliRunner() result = runner.invoke(report, ['--server', TEST_URL, '--version', '1.0.0', '--include-tags', 'test', '--output_path', "path/to/output", '--serve', '--port', 9090, - '--uptime', 1000]) + '--uptime', 1000, '--test-path', 'unittests']) assert result.exit_code == 0 def test_validate_regex_failure(self): """Asserts if the application raises CLI error if invalid regex is provided for tags""" runner = CliRunner() - result = runner.invoke(report, ['--server', TEST_URL, '--version', '1.0.0', '--exclude-tags', '%%INVALID%%']) + result = runner.invoke(report, ['--server', TEST_URL, '--version', '1.0.0', '--exclude-tags', '%%INVALID%%', + '--test-path', 'unittests']) assert result.exit_code == 2 assert "Only letters (a-z, A-Z), digits (0-9) and underscores (_) are allowed." in result.output diff --git a/unittests/test_functions.py b/unittests/test_functions.py index 358f38e..136ea67 100644 --- a/unittests/test_functions.py +++ b/unittests/test_functions.py @@ -3,13 +3,13 @@ This module is to test the project functions """ -import unittest from unittest.mock import ( MagicMock, patch ) import polling2 +import pytest from requests.models import Response from compliance_suite.exceptions.compliance_exception import ( @@ -20,7 +20,15 @@ from compliance_suite.functions.report import Report -class TestFunctions(unittest.TestCase): +class TestFunctions: + + @pytest.fixture + def client(self): + """Pytest fixture for client""" + + with patch("importlib.import_module"): + client = Client() + yield client def test_report_generate(self): """Assert generate report method to be successful""" @@ -31,46 +39,42 @@ def test_report_generate(self): assert True @patch('requests.get') - def test_send_request_get(self, mock_get): + def test_send_request_get(self, mock_get, client): """ Asserts the Get endpoint response status to be 200""" mock_get.return_value = MagicMock(status_code=200) - client = Client() - get_response = client.send_request(service="TES", server="test-server", version="test-version", + get_response = client.send_request(server="test-server", version="test-version", endpoint="test-endpoint", path_params={"test": "test"}, query_params={"test": "test"}, operation="GET", request_body="") assert get_response.status_code == 200 - def test_send_request_get_failure(self): + def test_send_request_get_failure(self, client): """ Asserts the Get endpoint to throw Connection error due to invalid server URL""" - client = Client() - with self.assertRaises(TestRunnerException): - client.send_request(service="TES", server="test-server", version="test-version", + with pytest.raises(TestRunnerException): + client.send_request(server="test-server", version="test-version", endpoint="test-endpoint", path_params={}, query_params={}, operation="GET", request_body="") @patch('requests.post') - def test_send_request_post(self, mock_post): + def test_send_request_post(self, mock_post, client): """ Asserts the Post endpoint response status to be 200""" mock_post.return_value = MagicMock(status_code=200) - client = Client() - response = client.send_request(service="TES", server="test-server", version="test-version", + response = client.send_request(server="test-server", version="test-version", endpoint="test-endpoint", path_params={}, query_params={}, operation="POST", request_body="{}") assert response.status_code == 200 @patch('polling2.poll') - def test_polling_request_success(self, mock_get): + def test_polling_request_success(self, mock_get, client): """ Asserts the polling response status to be 200""" mock_get.return_value = MagicMock(status_code=200) - client = Client() - get_response = client.poll_request(service="TES", server="test-server", version="test-version", + get_response = client.poll_request(server="test-server", version="test-version", endpoint="test-endpoint", path_params={"test": "test"}, query_params={"test": "test"}, operation="test", polling_interval=10, polling_timeout=3600, @@ -78,68 +82,61 @@ def test_polling_request_success(self, mock_get): assert get_response.status_code == 200 @patch('polling2.poll') - def test_polling_request_timeout(self, mock_get): + def test_polling_request_timeout(self, mock_get, client): """ Asserts the polling request to throw Timeout Exception""" mock_get.side_effect = polling2.TimeoutException(MagicMock()) - client = Client() - with self.assertRaises(TestFailureException): - client.poll_request(service="TES", server="test-server", version="test-version", + with pytest.raises(TestFailureException): + client.poll_request(server="test-server", version="test-version", endpoint="test-endpoint", path_params={"test": "test"}, query_params={"test": "test"}, operation="test", polling_interval=10, polling_timeout=5, check_cancel_val=False) - def test_polling_request_failure(self): - """ Asserts the polling request to throw Timeout Exception""" + def test_polling_request_failure(self, client): + """ Asserts the polling request to throw OSError""" - client = Client() - with self.assertRaises(TestRunnerException): - client.poll_request(service="TES", server="invalid-url", version="test-version", + with pytest.raises(TestRunnerException): + client.poll_request(server="invalid-url", version="test-version", endpoint="test-endpoint", path_params={"test": "test"}, query_params={"test": "test"}, operation="test", polling_interval=10, polling_timeout=3600, check_cancel_val=False) - def test_check_poll_create(self): + def test_check_poll_create(self, client): """ Asserts the check poll function to be True for status code 200 and COMPLETE state""" - client = Client() resp = MagicMock(status_code=200) resp.json.return_value = {"state": "COMPLETE"} assert client.check_poll(resp) is True - def test_check_poll_cancel(self): + def test_check_poll_cancel(self, client): """ Asserts the check poll function to be True for status code 200 and CANCELED state""" - client = Client() client.check_cancel = True resp = MagicMock(status_code=200) resp.json.return_value = {"state": "CANCELED"} assert client.check_poll(resp) is True - def test_check_poll_canceling(self): + def test_check_poll_canceling(self, client): """ Asserts the check poll function to be True for status code 200 and CANCELING state""" - client = Client() client.check_cancel = True resp = MagicMock(status_code=200) resp.json.return_value = {"state": "CANCELING"} assert client.check_poll(resp) is True - def test_check_poll_fail(self): + def test_check_poll_fail(self, client): """ Asserts the check poll function to be False for status code not equal to 200""" - client = Client() resp = Response resp.status_code = 400 assert client.check_poll(resp) is False - def test_check_poll_retry(self): + def test_check_poll_retry(self, client): """ Asserts the check poll function to be False and retry for status code 200 and RANDOM state""" - client = Client() resp = MagicMock(status_code=200) resp.json.return_value = {"state": "RANDOM"} diff --git a/unittests/test_job_runner.py b/unittests/test_job_runner.py index 05a8625..0e62750 100644 --- a/unittests/test_job_runner.py +++ b/unittests/test_job_runner.py @@ -8,7 +8,9 @@ MagicMock, patch ) +import yaml +from jsonschema import ValidationError import pytest from compliance_suite.exceptions.compliance_exception import ( @@ -27,6 +29,9 @@ YAML_TEST_PATH_SKIP = Path("unittests/data/run_job_tests/skip_01.yml") YAML_TEST_PATH_FAIL = Path("unittests/data/run_job_tests/fail_service_info.yml") YAML_WRONG_SCHEMA = Path("unittests/data/tests/wrong_schema_yaml.yml") +TEST_CONFIG_DIR = Path("unittests/data/test_config") +TEST_SCHEMA = Path("unittests/data/test_config/test_schema.json") +TEMPLATE_SCHEMA = Path("unittests/data/test_config/template_schema.json") class TestJobRunner: @@ -45,23 +50,34 @@ def test_generate_report(self): job_runner_object.generate_report() assert True - def test_load_and_validate_yaml_data_test(self): + @patch("compliance_suite.job_runner.Path") + def test_load_and_validate_yaml_data_test(self, mock_path): """ Asserts validate job functions for proper YAML schema""" + mock_path.side_effect = [TEST_CONFIG_DIR, TEST_SCHEMA, TEMPLATE_SCHEMA] + job_runner_object = JobRunner(TEST_URL, "1.0.0") yaml_data = job_runner_object.load_and_validate_yaml_data(str(YAML_TEST_PATH_SUCCESS), "Test") assert "service" in yaml_data - def test_load_and_validate_yaml_data_template(self): + @patch("compliance_suite.job_runner.Path") + def test_load_and_validate_yaml_data_template(self, mock_path): """ Asserts validate job functions for proper YAML schema""" + mock_path.side_effect = [TEST_CONFIG_DIR, TEST_SCHEMA, TEMPLATE_SCHEMA] + job_runner_object = JobRunner(TEST_URL, "1.0.0") yaml_data = job_runner_object.load_and_validate_yaml_data(str(YAML_TEMPLATE_PATH_SUCCESS), "Template") assert "endpoint" in yaml_data[0] - def test_load_and_validate_yaml_data_failure(self): + @patch("compliance_suite.job_runner.validate") + @patch("compliance_suite.job_runner.Path") + def test_load_and_validate_yaml_data_failure(self, mock_path, mock_validate): """ Asserts validate_job() function for incorrect YAML schema""" + mock_path.side_effect = [TEST_CONFIG_DIR, TEST_SCHEMA, TEMPLATE_SCHEMA] + mock_validate.side_effect = ValidationError("Error") + with pytest.raises(JobValidationException): job_runner_object = JobRunner(TEST_URL, "1.0.0") job_runner_object.load_and_validate_yaml_data(str(YAML_WRONG_SCHEMA), "Test") @@ -80,11 +96,16 @@ def test_run_jobs_dir(self, mock_initialize_test): job_runner_object.run_jobs() assert mock_initialize_test.call_count == len(list(Path("unittests/data/run_job_tests").glob("**/*.yml"))) + @patch('compliance_suite.job_runner.JobRunner.load_and_validate_yaml_data') @patch.object(TestRunner, 'run_tests') - def test_initialize_test(self, mock_run_tests): + def test_initialize_test(self, mock_run_tests, mock_fn): + job_runner_object = JobRunner(TEST_URL, "1.0.0") job_runner_object.set_report(Report()) + yaml_files = [YAML_TEST_PATH_SUCCESS, YAML_TEMPLATE_PATH_SUCCESS, YAML_TEST_PATH_SKIP, YAML_TEST_PATH_FAIL] + mock_fn.side_effect = [yaml.safe_load(open(file_path, "r")) for file_path in yaml_files] + job_runner_object.initialize_test(YAML_TEST_PATH_SUCCESS) assert len(job_runner_object.test_status["passed"]) == 1 diff --git a/unittests/test_test_runner.py b/unittests/test_test_runner.py index 3a4a2b6..46a46d2 100644 --- a/unittests/test_test_runner.py +++ b/unittests/test_test_runner.py @@ -8,6 +8,7 @@ patch ) +from pydantic import BaseModel import pytest from compliance_suite.exceptions.compliance_exception import ( @@ -16,11 +17,7 @@ ) from compliance_suite.functions.client import Client from compliance_suite.test_runner import TestRunner -from unittests.data.constants import ( - TEST_SERVICE, - TEST_URL, - TEST_VERSIONS -) +from unittests.data.constants import TEST_URL class TestTestRunner: @@ -29,21 +26,23 @@ class TestTestRunner: def default_test_runner(self): """Pytest fixture for default test runner with required job fields""" - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "x.y.z") + test_runner = TestRunner(TEST_URL, "x.y.z") test_runner.report_test = MagicMock() test_runner.job_data = { "name": "test", + "description": "test", "operation": "test", "endpoint": "test", "response": {"200": ""} } return test_runner - @pytest.mark.parametrize("version", TEST_VERSIONS) # Use parameterized versions once to cover all models - def test_validate_logic_success(self, version): + @patch("compliance_suite.test_runner.getattr") + @patch("importlib.import_module") + def test_validate_logic_success(self, mock_module, mock_getattr): """ Asserts validate_logic() function for successful schema validation to API Model""" - test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) + test_runner = TestRunner(TEST_URL, "1.0.0") test_runner.set_job_data( { "operation": "test", @@ -66,156 +65,83 @@ def test_validate_logic_success(self, version): assert test_runner.validate_logic("service_info", service_info_response, "Response") is None - def test_validate_logic_failure(self): + @patch("compliance_suite.test_runner.getattr") + @patch("importlib.import_module") + def test_validate_logic_failure(self, mock_module, mock_getattr, default_test_runner): """ Asserts validate_logic() function for unsuccessful schema validation to API Model""" - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - test_runner.set_job_data( - { - "operation": "test", - "endpoint": "test" - } - ) - test_runner.report_test = MagicMock() + class TestPydanticClass(BaseModel): + field1: str + field2: str + field3: str + + mock_getattr.return_value = TestPydanticClass + with pytest.raises(TestFailureException): - test_runner.validate_logic("service_info", {}, "Response") + default_test_runner.validate_logic("service_info", {}, "Response") @patch.object(TestRunner, "validate_logic") - def test_validate_request_body_success(self, mock_validate_job): + def test_validate_request_body_success(self, mock_validate_job, default_test_runner): """ Asserts validate_request_body() function for successful JSON format and schema validation to API Model""" - mock_validate_job.return_value = {} - - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - test_runner.set_job_data( - { - "name": "test", - "operation": "test", - "endpoint": "test" - } - ) - test_runner.report_test = MagicMock() - assert test_runner.validate_request_body("{}") is None + assert default_test_runner.validate_request_body("{}") is None - def test_validate_request_body_failure(self): + def test_validate_request_body_failure(self, default_test_runner): """ Asserts validate_request_body() function for unsuccessful JSON format""" - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - test_runner.set_job_data( - { - "operation": "test", - "endpoint": "test" - } - ) - test_runner.report_test = MagicMock() with pytest.raises(JobValidationException): - test_runner.validate_request_body("{") + default_test_runner.validate_request_body("{") @patch.object(TestRunner, "validate_logic") - def test_validate_response_success_get(self, mock_validate_job): + def test_validate_response_custom_endpoint_model(self, mock_validate_job, default_test_runner): """ Asserts validate_response() function for successful response and schema validation to API Model""" - mock_validate_job.return_value = {} - - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - test_runner.set_job_data( - { - "name": "list_tasks", - "operation": "test", - "endpoint": "test", - "query_parameters": [{"view": "BASIC"}], - "response": {"200": ""} - } - ) - test_runner.report_test = MagicMock() + default_test_runner.job_data["name"] = "list_tasks" + default_test_runner.job_data["query_parameters"] = [{"view": "BASIC"}] - resp = MagicMock(status_code=200, text="") - assert test_runner.validate_response(resp) is None + resp = MagicMock(status_code=200) + assert default_test_runner.validate_response(resp) is None @patch.object(TestRunner, "validate_logic") - def test_validate_response_success(self, mock_validate_job): + def test_validate_response_empty(self, mock_validate_job, default_test_runner): """ Asserts validate_response() function for successful response and schema validation to API Model""" - mock_validate_job.return_value = {} - - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - test_runner.set_job_data( - { - "name": "test", - "operation": "test", - "endpoint": "test", - "response": {"200": ""} - } - ) - test_runner.report_test = MagicMock() - - resp = MagicMock(status_code=200) - assert test_runner.validate_response(resp) is None + resp = MagicMock(status_code=200, text="") + assert default_test_runner.validate_response(resp) is None - def test_validate_response_failure(self): + def test_validate_response_failure(self, default_test_runner): """ Asserts validate_response() function for unsuccessful response""" - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - test_runner.set_job_data( - { - "operation": "test", - "endpoint": "test", - "response": {"200": ""} - } - ) - test_runner.report_test = MagicMock() - resp = MagicMock(status_code=400) with pytest.raises(TestFailureException): - test_runner.validate_response(resp) + default_test_runner.validate_response(resp) - @patch.object(Client, "poll_request") + @patch("importlib.import_module") + @patch.object(Client, "send_request") + @patch.object(TestRunner, "validate_request_body") @patch.object(TestRunner, "validate_response") - def test_run_jobs_get_task(self, mock_validate_response, mock_client): - """Assert the run job method for get task to be successful""" + def test_run_tests(self, mock_validate_response, mock_validate_request_body, + mock_client, mock_module, default_test_runner): - mock_validate_response.return_value = {} - mock_client.return_value = MagicMock() + default_test_runner.job_data["path_parameters"] = {"test": "test"} + default_test_runner.job_data["query_parameters"] = [{"test": "test"}] + default_test_runner.job_data["request_body"] = '{ "test": "test" }' + assert default_test_runner.run_tests(default_test_runner.job_data, MagicMock()) is None - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - job_data = { - "name": "get_task", - "description": "test", - "operation": "test", - "endpoint": "test", - "query_parameters": [{"view": "BASIC"}], - "polling": {"interval": 10, "timeout": 10}, - "env_vars": { - "check_cancel": "True" - } - } - test_runner.set_auxiliary_space("id", "1234") - assert test_runner.run_tests(job_data, MagicMock()) is None + @patch("importlib.import_module") + @patch.object(Client, "poll_request") + @patch.object(TestRunner, "validate_response") + def test_run_tests_polling_request(self, mock_validate_response, + mock_client, mock_module, default_test_runner): - @patch.object(Client, "send_request") - @patch.object(TestRunner, "validate_request_body") - @patch.object(TestRunner, "validate_logic") - def test_run_jobs_create_task(self, mock_validate_logic, mock_validate_request_body, mock_client): - """Assert the run job method for create task to be successful""" + default_test_runner.job_data["polling"] = {"interval": 10, "timeout": 10} + default_test_runner.job_data["env_vars"] = {"check_cancel": "True"} + assert default_test_runner.run_tests(default_test_runner.job_data, MagicMock()) is None - mock_validate_logic.return_value = {} - mock_validate_request_body.return_value = {} - resp = MagicMock(status_code=200, text='{"id": "1234"}') - mock_client.return_value = resp + def test_save_storage_vars(self, default_test_runner): - test_runner = TestRunner(TEST_SERVICE, TEST_URL, "1.0.0") - job_data = { - "name": "create_task", - "description": "test", - "operation": "test", - "endpoint": "test", - "request_body": "{}", - "storage_vars": { - "id": "$response.id" - }, - "response": {"200": ""} - } - assert test_runner.run_tests(job_data, MagicMock()) is None + default_test_runner.job_data["storage_vars"] = {"id": "$response.id"} + assert default_test_runner.save_storage_vars(default_test_runner.job_data) is None def test_validate_filters_string_success(self, default_test_runner): """Assert validate filters to be successful for string type"""