diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml new file mode 100644 index 0000000..7c598af --- /dev/null +++ b/.github/workflows/run-tests.yml @@ -0,0 +1,49 @@ +# This workflow will install Python dependencies and run tests with PyTest using Python 3.8 +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Run tests + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + # Checkout repository + - uses: actions/checkout@v3 + + # Set Python version + - name: Set up Python 3.8 + uses: actions/setup-python@v4 + with: + python-version: 3.8 + + # Set up submodules and submodule dependencies + # TODO: Uncomment when there are submodules + # - name: Set up submodule and submodule dependencies + # run: | + # git submodule update --init --recursive --remote + # pip install -r ./modules/common/requirements.txt + + # Install project dependencies + - name: Install project dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + # Run linters and formatters + - name: Linters and formatters + run: | + black --check . + flake8 . + pylint . + + # Run unit tests with PyTest + # TODO: Uncomment when there are unit tests to run + # - name: Run unit tests + # run: pytest -vv diff --git a/.gitignore b/.gitignore index b9b62af..19bf3cd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# IDE +# IDEs .idea/ .vscode/ @@ -7,4 +7,7 @@ __pycache__/ venv/ # Logging -*log* +logs/ + +# Testing +test_images/ diff --git a/avif_benchmark.py b/avif_benchmark.py index 8586a51..4855687 100644 --- a/avif_benchmark.py +++ b/avif_benchmark.py @@ -6,6 +6,7 @@ as well as a .json with the test data and a .csv which provides a more human-friendly summary of the data. """ + import gc import io import json @@ -21,7 +22,7 @@ FRAME_COUNT = 300 # Total number of frames FRAME_TO_SAVE = 69 # This frame is good, it has both landing pads in it INPUT_PATH = pathlib.Path("test_images", "Encode Test Dataset 2024") -OUTPUT_PATH = pathlib.Path(f"log_{int(time.time())}") +OUTPUT_PATH = pathlib.Path("logs", str(int(time.time()))) # All the quality settings to test (-1 should represent 'lossless', # although it is only lossless in case of 444 subsampling) QUALITY_SETTINGS = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100] @@ -44,7 +45,7 @@ "Quality", "Chroma", "Min Time (ms)", - "Max Time (ms)", + "Max Time (ms)", "Avg Time (ms)", "Min Size (B)", "Max Size (B)", @@ -56,9 +57,11 @@ HEADER_LINE = ",".join(HEADERS) + "\n" -def update_min_max(min_value: "int | float", - max_value: "int | float", - current_value: "int | float",) -> "tuple[int, int] | tuple[float, float]": +def update_min_max( + min_value: "int | float", + max_value: "int | float", + current_value: "int | float", +) -> "tuple[int, int] | tuple[float, float]": """ Updates the min and max values for a measurement. @@ -70,7 +73,7 @@ def update_min_max(min_value: "int | float", Returns: (min_value, max_value) min_value: new updated minimum recorded value max_value: new updated maximum recorded value - + The intended output is something like [int, int] or [float, float], but it is not guaranteed because the inputs could be a combination of int and float. eg. could also be tuple[float, int] @@ -83,7 +86,10 @@ def update_min_max(min_value: "int | float", return min_value, max_value -def run(): +def main() -> int: + """ + Main function. + """ pillow_heif.register_avif_opener(thumbnails=False) OUTPUT_PATH.mkdir(parents=True, exist_ok=True) @@ -102,14 +108,15 @@ def run(): MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, FRAME_DATA: [], - } for chroma in CHROMA_SETTINGS - } for quality in QUALITY_SETTINGS + } + for chroma in CHROMA_SETTINGS + } + for quality in QUALITY_SETTINGS } test_begin = time.time() print("Start time:", test_begin) - for quality in QUALITY_SETTINGS: print(f"-----------------QUALITY = {quality}--------------------") for chroma in CHROMA_SETTINGS: @@ -145,9 +152,10 @@ def run(): # Save singular test results time_ns = end - start size_B = buffer.getbuffer().nbytes - compression_ratio = 100 * size_B / os.path.getsize( + original_size_B = os.path.getsize( pathlib.Path(INPUT_PATH, f"{frame_index}.png"), ) + compression_ratio = 100 * size_B / original_size_B min_time_ns, max_time_ns = update_min_max(min_time_ns, max_time_ns, time_ns) min_size_B, max_size_B = update_min_max(min_size_B, max_size_B, size_B) @@ -163,7 +171,7 @@ def run(): test_result = { "time_ns": time_ns, "size_B": size_B, - "size_ratio_compressed_to_original_%": compression_ratio + "size_ratio_compressed_to_original_%": compression_ratio, } current_result[FRAME_DATA].append(test_result) @@ -185,8 +193,9 @@ def run(): current_result[AVG_SIZE_B] = total_size_B / FRAME_COUNT current_result[MIN_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = min_compression_ratio current_result[MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = max_compression_ratio - current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = \ + current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = ( total_compression_ratio / FRAME_COUNT + ) print(f"chroma {chroma} completed") print("") @@ -194,11 +203,11 @@ def run(): print("") # Saving full results - with open(pathlib.Path(OUTPUT_PATH, "results.json"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "results.json"), "w", encoding="utf-8") as file: file.write(json.dumps(results, indent=2)) # Saving shortcut results without frame data (for more human readability) - with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), "w", encoding="utf-8") as file: file.write(HEADER_LINE) for quality in QUALITY_SETTINGS: for chroma in CHROMA_SETTINGS: @@ -229,6 +238,12 @@ def run(): "secs", ) + return 0 + if __name__ == "__main__": - run() + result_main = main() + if result_main < 0: + print(f"ERROR: Status code: {result_main}") + + print("Done!") diff --git a/heif_benchmark.py b/heif_benchmark.py index cb2bc8e..da7c1b5 100644 --- a/heif_benchmark.py +++ b/heif_benchmark.py @@ -6,6 +6,7 @@ as well as a .json with the test data and a .csv which provides a more human-friendly summary of the data. """ + import gc import io import json @@ -21,7 +22,7 @@ FRAME_COUNT = 300 # Total number of frames FRAME_TO_SAVE = 69 # This frame is good, it has both landing pads in it INPUT_PATH = pathlib.Path("test_images", "Encode Test Dataset 2024") -OUTPUT_PATH = pathlib.Path(f"log_{int(time.time())}") +OUTPUT_PATH = pathlib.Path("logs", str(int(time.time()))) # All the quality settings to test (-1 should represent 'lossless', # although it is only lossless in case of 444 subsampling) @@ -45,7 +46,7 @@ "Quality", "Chroma", "Min Time (ms)", - "Max Time (ms)", + "Max Time (ms)", "Avg Time (ms)", "Min Size (B)", "Max Size (B)", @@ -57,9 +58,11 @@ HEADER_LINE = ",".join(HEADERS) + "\n" -def update_min_max(min_value: "int | float", - max_value: "int | float", - current_value: "int | float",) -> "tuple[int, int] | tuple[float, float]": +def update_min_max( + min_value: "int | float", + max_value: "int | float", + current_value: "int | float", +) -> "tuple[int, int] | tuple[float, float]": """ Updates the min and max values for a measurement. @@ -71,7 +74,7 @@ def update_min_max(min_value: "int | float", Returns: (min_value, max_value) min_value: new updated minimum recorded value max_value: new updated maximum recorded value - + The intended output is something like [int, int] or [float, float], but it is not guaranteed because the inputs could be a combination of int and float. eg. could also be tuple[float, int] @@ -84,7 +87,10 @@ def update_min_max(min_value: "int | float", return min_value, max_value -def run(): +def main() -> int: + """ + Main function. + """ register_heif_opener(thumbnails=False) OUTPUT_PATH.mkdir(parents=True, exist_ok=True) @@ -103,14 +109,15 @@ def run(): MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, FRAME_DATA: [], - } for chroma in CHROMA_SETTINGS - } for quality in QUALITY_SETTINGS + } + for chroma in CHROMA_SETTINGS + } + for quality in QUALITY_SETTINGS } test_begin = time.time() print("Start time:", test_begin) - for quality in QUALITY_SETTINGS: print(f"-----------------QUALITY = {quality}--------------------") for chroma in CHROMA_SETTINGS: @@ -123,8 +130,7 @@ def run(): min_compression_ratio = float("inf") max_compression_ratio = 0 total_compression_ratio = 0 - current_result = \ - results[f"quality_{quality}"][f"chroma_{chroma}"] + current_result = results[f"quality_{quality}"][f"chroma_{chroma}"] for frame_index in range(FRAME_COUNT): img = Image.open(pathlib.Path(INPUT_PATH, f"{frame_index}.png")) buffer = io.BytesIO() @@ -144,9 +150,10 @@ def run(): # Save singular test results time_ns = end - start size_B = buffer.getbuffer().nbytes - compression_ratio = 100 * size_B / os.path.getsize( + original_size_B = os.path.getsize( pathlib.Path(INPUT_PATH, f"{frame_index}.png"), ) + compression_ratio = 100 * size_B / original_size_B min_time_ns, max_time_ns = update_min_max(min_time_ns, max_time_ns, time_ns) min_size_B, max_size_B = update_min_max(min_size_B, max_size_B, size_B) @@ -162,7 +169,7 @@ def run(): test_result = { "time_ns": time_ns, "size_B": size_B, - "size_ratio_compressed_to_original_%": compression_ratio + "size_ratio_compressed_to_original_%": compression_ratio, } current_result[FRAME_DATA].append(test_result) @@ -184,8 +191,9 @@ def run(): current_result[AVG_SIZE_B] = total_size_B / FRAME_COUNT current_result[MIN_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = min_compression_ratio current_result[MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = max_compression_ratio - current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = \ + current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = ( total_compression_ratio / FRAME_COUNT + ) print(f"chroma {chroma} complete") print("") @@ -193,16 +201,15 @@ def run(): print("") # Saving full results - with open(pathlib.Path(OUTPUT_PATH, "results.json"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "results.json"), "w", encoding="utf-8") as file: file.write(json.dumps(results, indent=2)) # Saving shortcut results without frame data (for more human readability) - with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), "w", encoding="utf-8") as file: file.write(HEADER_LINE) for quality in QUALITY_SETTINGS: for chroma in CHROMA_SETTINGS: - current_result = \ - results[f"quality_{quality}"][f"chroma_{chroma}"] + current_result = results[f"quality_{quality}"][f"chroma_{chroma}"] line_stats = [ str(quality), str(chroma), @@ -229,5 +236,12 @@ def run(): "secs", ) + return 0 + + if __name__ == "__main__": - run() + result_main = main() + if result_main < 0: + print(f"ERROR: Status code: {result_main}") + + print("Done!") diff --git a/jpeg_benchmark.py b/jpeg_benchmark.py index 66bba8a..6b544d3 100644 --- a/jpeg_benchmark.py +++ b/jpeg_benchmark.py @@ -1,12 +1,17 @@ +""" +Benchmarks JPEG encoding. +""" + import gc import io import json import os import pathlib import time -import datetime + from PIL import Image + # Setting parameters FRAME_COUNT = 300 FRAME_TO_SAVE = 69 @@ -31,7 +36,7 @@ HEADERS = [ "Quality", "Min Time (ms)", - "Max Time (ms)", + "Max Time (ms)", "Avg Time (ms)", "Min Size (B)", "Max Size (B)", @@ -42,9 +47,12 @@ ] HEADER_LINE = ",".join(HEADERS) + "\n" -def update_min_max(min_value: "int | float", - max_value: "int | float", - current_value: "int | float",) -> "tuple[int, int] | tuple[float, float]": + +def update_min_max( + min_value: "int | float", + max_value: "int | float", + current_value: "int | float", +) -> "tuple[int, int] | tuple[float, float]": """ Updates the min and max values for a measurement. @@ -56,7 +64,7 @@ def update_min_max(min_value: "int | float", Returns: (min_value, max_value) min_value: new updated minimum recorded value max_value: new updated maximum recorded value - + The intended output is something like [int, int] or [float, float], but it is not guaranteed because the inputs could be a combination of int and float. eg. could also be tuple[float, int] @@ -68,7 +76,11 @@ def update_min_max(min_value: "int | float", return min_value, max_value -def run(): + +def main() -> int: + """ + Main function. + """ OUTPUT_PATH.mkdir(parents=True, exist_ok=True) # Set up results dictionary @@ -84,8 +96,9 @@ def run(): MIN_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, - FRAME_DATA: [], - } for quality in QUALITY_SETTINGS + FRAME_DATA: [], + } + for quality in QUALITY_SETTINGS } test_begin = time.time() @@ -107,25 +120,22 @@ def run(): current_result = results[f"lossy_{quality}"] for frame_index in range(FRAME_COUNT): - image = Image.open(pathlib.Path(INPUT_PATH, f"{frame_index}.png")) # load one at a time + image = Image.open(pathlib.Path(INPUT_PATH, f"{frame_index}.png")) # load one at a time buffer = io.BytesIO() # Encode the frame with specified settings and time gc.disable() start = time.time_ns() - image.save( - buffer, - format="JPEG", - quality=quality - ) + image.save(buffer, format="JPEG", quality=quality) end = time.time_ns() gc.enable() time_ns = end - start size_B = buffer.getbuffer().nbytes - compression_ratio = 100 * size_B / os.path.getsize( - pathlib.Path(INPUT_PATH, f"{frame_index}.png") + original_size_B = os.path.getsize( + pathlib.Path(INPUT_PATH, f"{frame_index}.png"), ) + compression_ratio = 100 * size_B / original_size_B min_time_ns, max_time_ns = update_min_max(min_time_ns, max_time_ns, time_ns) min_size_B, max_size_B = update_min_max(min_size_B, max_size_B, size_B) @@ -141,17 +151,13 @@ def run(): test_result = { "time_ns": time_ns, "size_B": size_B, - "size_ratio_compressed_to_original_%": compression_ratio + "size_ratio_compressed_to_original_%": compression_ratio, } current_result[FRAME_DATA].append(test_result) # Save one image (this one has 2 landing pads in it) for reference if frame_index == FRAME_TO_SAVE: - image.save( - OUTPUT_PATH / f"q{quality}.jpeg", - format="JPEG", - quality=quality - ) + image.save(OUTPUT_PATH / f"q{quality}.jpeg", format="JPEG", quality=quality) # Save average test results current_result[MIN_TIME_MS] = min_time_ns / 1e6 @@ -162,20 +168,21 @@ def run(): current_result[AVG_SIZE_B] = total_size_B / FRAME_COUNT current_result[MIN_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = min_compression_ratio current_result[MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = max_compression_ratio - current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = \ + current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = ( total_compression_ratio / FRAME_COUNT + ) print(f"Quality {quality} complete") - + print("") print("-------------------TEST COMPLETED------------------") print("") # Saving full results - with open(pathlib.Path(OUTPUT_PATH, "results.json"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "results.json"), "w", encoding="utf-8") as file: file.write(json.dumps(results, indent=2)) # Saving shortcut results without frame data (for more human readability) - with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), "w", encoding="utf-8") as file: file.write(HEADER_LINE) for quality in QUALITY_SETTINGS: current_result = results[f"lossy_{quality}"] @@ -204,5 +211,12 @@ def run(): "secs", ) + return 0 + + if __name__ == "__main__": - run() + result_main = main() + if result_main < 0: + print(f"ERROR: Status code: {result_main}") + + print("Done!") diff --git a/png_benchmark.py b/png_benchmark.py index 649ff3e..5fa6c4c 100644 --- a/png_benchmark.py +++ b/png_benchmark.py @@ -6,6 +6,7 @@ as well as a .json with the test data and a .csv which provides a more human-friendly summary of the data. """ + import gc import io import json @@ -20,10 +21,11 @@ FRAME_COUNT = 300 # Total number of frames FRAME_TO_SAVE = 69 # This frame is good, it has both landing pads in it INPUT_PATH = pathlib.Path("test_images", "Encode Test Dataset 2024") -OUTPUT_PATH = pathlib.Path(f"log_{int(time.time())}") +OUTPUT_PATH = pathlib.Path("logs", str(int(time.time()))) COMPRESS_TYPES = [0, 1, 2, 3, 4] # There are 5 different compression algorithms, each are numbered -INITIAL_COMPRESS_LEVEL = 1 # Compress level 0 is skipped because it is uncompressed -MAX_COMPRESS_LEVEL = 6 # Although this maxes out at 9, it takes way too long (like 10s per image) +# Compress level 0 is skipped because it is uncompressed +# Although this maxes out at 9, it takes way too long (like 10s per image) +COMPRESS_LEVELS = [1, 2, 3, 4, 5, 6] # Keys for dictionary entries MAX_TIME_MS = "max_time_ms" @@ -42,7 +44,7 @@ "Compression Type", "Compression Level", "Min Time (ms)", - "Max Time (ms)", + "Max Time (ms)", "Avg Time (ms)", "Min Size (B)", "Max Size (B)", @@ -54,9 +56,11 @@ HEADER_LINE = ",".join(HEADERS) + "\n" -def update_min_max(min_value: "int | float", - max_value: "int | float", - current_value: "int | float",) -> "tuple[int, int] | tuple[float, float]": +def update_min_max( + min_value: "int | float", + max_value: "int | float", + current_value: "int | float", +) -> "tuple[int, int] | tuple[float, float]": """ Updates the min and max values for a measurement. @@ -68,7 +72,7 @@ def update_min_max(min_value: "int | float", Returns: (min_value, max_value) min_value: new updated minimum recorded value max_value: new updated maximum recorded value - + The intended output is something like [int, int] or [float, float], but it is not guaranteed because the inputs could be a combination of int and float. eg. could also be tuple[float, int] @@ -81,7 +85,10 @@ def update_min_max(min_value: "int | float", return min_value, max_value -def run(): +def main() -> int: + """ + Main function. + """ OUTPUT_PATH.mkdir(parents=True, exist_ok=True) results = { @@ -98,17 +105,18 @@ def run(): MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL: 0, FRAME_DATA: [], - } for compress_level in range(INITIAL_COMPRESS_LEVEL, MAX_COMPRESS_LEVEL + 1) - } for compress_type in COMPRESS_TYPES + } + for compress_level in COMPRESS_LEVELS + } + for compress_type in COMPRESS_TYPES } test_begin = time.time() print("Start time:", test_begin) - for compress_type in COMPRESS_TYPES: print(f"-----------------COMPRESS TYPE {compress_type}--------------------") - for compress_level in range(INITIAL_COMPRESS_LEVEL, MAX_COMPRESS_LEVEL + 1): + for compress_level in COMPRESS_LEVELS: min_time_ns = float("inf") max_time_ns = 0 total_time_ns = 0 @@ -118,8 +126,9 @@ def run(): min_compression_ratio = float("inf") max_compression_ratio = 0 total_compression_ratio = 0 - current_result = \ - results[f"compress_type_{compress_type}"][f"compress_level_{compress_level}"] + current_result = results[f"compress_type_{compress_type}"][ + f"compress_level_{compress_level}" + ] for frame_index in range(FRAME_COUNT): img = Image.open(pathlib.Path(INPUT_PATH, f"{frame_index}.png")) buffer = io.BytesIO() @@ -139,9 +148,10 @@ def run(): # Save singular test results time_ns = end - start size_B = buffer.getbuffer().nbytes - compression_ratio = 100 * size_B / os.path.getsize( + original_size_B = os.path.getsize( pathlib.Path(INPUT_PATH, f"{frame_index}.png"), ) + compression_ratio = 100 * size_B / original_size_B min_time_ns, max_time_ns = update_min_max(min_time_ns, max_time_ns, time_ns) min_size_B, max_size_B = update_min_max(min_size_B, max_size_B, size_B) @@ -157,7 +167,7 @@ def run(): test_result = { "time_ns": time_ns, "size_B": size_B, - "size_ratio_compressed_to_original_%": compression_ratio + "size_ratio_compressed_to_original_%": compression_ratio, } current_result[FRAME_DATA].append(test_result) @@ -179,8 +189,9 @@ def run(): current_result[AVG_SIZE_B] = total_size_B / FRAME_COUNT current_result[MIN_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = min_compression_ratio current_result[MAX_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = max_compression_ratio - current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = \ + current_result[AVG_SIZE_RATIO_COMPRESSED_TO_ORIGINAL] = ( total_compression_ratio / FRAME_COUNT + ) print(f"Compress level {compress_level} complete") print("") @@ -188,16 +199,17 @@ def run(): print("") # Saving full results - with open(pathlib.Path(OUTPUT_PATH, "results.json"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "results.json"), "w", encoding="utf-8") as file: file.write(json.dumps(results, indent=2)) # Saving shortcut results without frame data (for more human readability) - with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), 'w', encoding="utf-8") as file: + with open(pathlib.Path(OUTPUT_PATH, "summary.csv"), "w", encoding="utf-8") as file: file.write(HEADER_LINE) for compress_type in COMPRESS_TYPES: - for compress_level in range(INITIAL_COMPRESS_LEVEL, MAX_COMPRESS_LEVEL + 1): - current_result = \ - results[f"compress_type_{compress_type}"][f"compress_level_{compress_level}"] + for compress_level in COMPRESS_LEVELS: + current_result = results[f"compress_type_{compress_type}"][ + f"compress_level_{compress_level}" + ] line_stats = [ str(compress_type), str(compress_level), @@ -224,5 +236,12 @@ def run(): "secs", ) + return 0 + + if __name__ == "__main__": - run() + result_main = main() + if result_main < 0: + print(f"ERROR: Status code: {result_main}") + + print("Done!") diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..9afd617 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,121 @@ +[tool.pylint.main] +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, it +# can't be used as an escape character. +ignore-paths = [ + # From .gitignore + # IDEs + ".idea/", + ".vscode/", + + # Python + "__pycache__/", + "venv/", + + # Logging + "logs/", + + # Outside of .gitignore + # Submodules + "modules/common/", +] + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs = 0 + +# Minimum Python version to use for version dependent checks. Will default to the +# version used to run pylint. +py-version = "3.8" + +# Discover python modules and packages in the file system subtree. +recursive = true + +# [tool.pylint.basic] +# Good variable names which should always be accepted, separated by a comma. +good-names = [ + "i", + "j", + "k", + "ex", + "Run", + "_", + + # Return of main() + "result_main", + # Suffix bytes (B) + "min_size_B", + "max_size_B", + "total_size_B", + "size_B", + "original_size_B", + "min_size_B", + "total_size_B", +] + +[tool.pylint."messages control"] +# Disable the message, report, category or checker with the given id(s). You can +# either give multiple identifiers separated by comma (,) or put this option +# multiple times (only on the command line, not in the configuration file where +# it should appear only once). You can also use "--disable=all" to disable +# everything first and then re-enable specific checks. For example, if you want +# to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable = [ + "raw-checker-failed", + "bad-inline-option", + "locally-disabled", + "file-ignored", + "suppressed-message", + "useless-suppression", + "deprecated-pragma", + "use-symbolic-message-instead", + "use-implicit-booleaness-not-comparison-to-string", + "use-implicit-booleaness-not-comparison-to-zero", + # WARG + # Ignore TODOs + "fixme", + # Pylint cannot find modules + "import-error", + # Covered by Black formatter + "line-too-long", + # Pylint cannot handle 3rd party imports + "no-member", + # Some classes are simple + "too-few-public-methods", + # Function signatures + "too-many-arguments", + # Don't care + "too-many-branches", + # Line count in file + "too-many-lines", + # Don't care + "too-many-locals", + # Don't care + "too-many-statements", + # Don't care + "too-many-return-statements", + # TODO: Duplicate code + "duplicate-code", +] + +[tool.pylint.similarities] +# Minimum lines number of a similarity. +# Main guard +min-similarity-lines = 10 + +[tool.pytest.ini_options] +minversion = "6.0" +# Submodules +addopts = "--ignore=modules/common/" + +[tool.black] +line-length = 100 +target-version = ["py38"] +# Excludes files or directories in addition to the defaults +# Submodules +extend-exclude = "modules/common/*" diff --git a/requirements.txt b/requirements.txt index f30d19b..94cbf22 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,8 @@ opencv-python Pillow pillow-heif + +# Linters and formatters are explicitly versioned +black==24.2.0 +flake8-annotations==3.0.1 +pylint==3.0.3 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..6a4f4bf --- /dev/null +++ b/setup.cfg @@ -0,0 +1,23 @@ +[flake8] +# For annotations only +select=ANN +# Disable annotations for `self` and `cls` +# https://github.com/sco1/flake8-annotations +ignore=ANN101,ANN102 +# File exclusion +extend-exclude= + # From .gitignore + # IDEs + .idea/, + .vscode/, + + # Python + __pycache__/, + venv/, + + # Logging + logs/, + + # Outside of .gitignore + # Submodules + modules/common/,