diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml deleted file mode 100644 index 647ba35..0000000 --- a/.github/workflows/coverage.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: CodeCov - -on: - # Trigger the workflow on push or pull request, - # but only for the main branch - workflow_dispatch: - push: - branches: - - master - pull_request: - branches: - - master - -jobs: - run: - name: CodeCov - runs-on: ubuntu-latest - - steps: - - name: Check out Git repository - uses: actions/checkout@v2 - - - name: Set up Python - uses: actions/setup-python@master - with: - python-version: 3.8 - - - name: Generate Report - run: | - pip install coverage - cd continuousprint && coverage run -m unittest discover -p "*_test.py" - - - name: Upload to Codecov - uses: codecov/codecov-action@v2 diff --git a/.travis.yml b/.travis.yml index 9b72eda..144f2f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,9 +9,12 @@ jobs: install: - pip install setuptools==60.9.0 # https://github.com/pypa/setuptools/issues/3293 - pip install OctoPrint # Need OctoPrint to satisfy req's of `__init__.py` + - pip install coverage coveralls - pip install -r requirements.txt script: - - python3 -m unittest discover -p "*_test.py" + - coverage run -m unittest discover -p "*_test.py" + after_success: + - coveralls notifications: email: - smartin015@gmail.com diff --git a/README.md b/README.md index f6ebeb3..c1f704d 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Continuous Print Queue for Octoprint ![build status](https://img.shields.io/travis/smartin015/continuousprint/master?style=plastic) -![code coverage](https://img.shields.io/codecov/c/github/smartin015/continuousprint/master) +[![Coverage Status](https://coveralls.io/repos/github/smartin015/continuousprint/badge.svg?branch=master)](https://coveralls.io/github/smartin015/continuousprint?branch=master) This plugin automates your 3D printing! diff --git a/continuousprint/__init__.py b/continuousprint/__init__.py index 03b63f8..1932494 100644 --- a/continuousprint/__init__.py +++ b/continuousprint/__init__.py @@ -59,7 +59,8 @@ def on_startup(self, host=None, port=None): self._logger, self._identifier, self._basefolder, - self._event_bus.fire, + # Events are of type CustomEvents and need to be unpacked:w + lambda e: self._event_bus.fire(e.event), ) def on_after_startup(self): @@ -96,10 +97,17 @@ def get_settings_defaults(self): # ---------------------- Begin TemplatePlugin ------------------- def get_template_vars(self): + try: + local_ip = self._plugin.get_local_addr().split(":")[0] + except Exception: + # Local IP details are used for display only + local_ip = "" return dict( + exceptions=self._plugin.get_exceptions(), printer_profiles=list(PRINTER_PROFILES.values()), gcode_scripts=list(GCODE_SCRIPTS.values()), - local_ip=self._plugin.get_local_ip(), + custom_events=[e.as_dict() for e in CustomEvents], + local_ip=local_ip, ) def get_template_configs(self): diff --git a/continuousprint/api.py b/continuousprint/api.py index 055d284..9e0b13b 100644 --- a/continuousprint/api.py +++ b/continuousprint/api.py @@ -12,6 +12,11 @@ class Permission(Enum): + GETSTATE = ( + "Get state", + "Allows for fetching queue and management state of Continuous Print", + False, + ) STARTSTOP = ( "Start and Stop Queue", "Allows for starting and stopping the queue", @@ -31,17 +36,31 @@ class Permission(Enum): "Allows for fetching history of print runs by Continuous Print", False, ) - CLEARHISTORY = ( - "Clear history", + RESETHISTORY = ( + "Reset history", "Allows for deleting all continuous print history data", True, ) - GETQUEUES = ("Get queue", "Allows for fetching metadata on all print queues", False) + GETQUEUES = ( + "Get queues", + "Allows for fetching metadata on all print queues", + False, + ) EDITQUEUES = ( "Edit queues", "Allows for adding/removing queues and rearranging them", True, ) + GETAUTOMATION = ( + "Get automation scripts and events", + "Allows for fetching metadata on all scripts and the events they're configured for", + False, + ) + EDITAUTOMATION = ( + "Edit automation scripts and events", + "Allows for adding/removing gcode scripts and registering them to execute when events happen", + True, + ) def __init__(self, longname, desc, dangerous): self.longname = longname @@ -112,6 +131,10 @@ def _msg(self, data): def _preprocess_set(self, data): pass # Used to auto-fill underspecified sets, e.g. add profile based on gcode analysis + @abstractmethod + def _set_external_symbols(self, data): + pass + def popup(self, msg, type="popup"): return self._msg(dict(type=type, msg=msg)) @@ -133,6 +156,7 @@ def _sync_history(self): # (e.g. 1.4.1 -> 2.0.0) @octoprint.plugin.BlueprintPlugin.route("/state/get", methods=["GET"]) @restricted_access + @cpq_permission(Permission.GETSTATE) def get_state(self): return self._state_json() @@ -143,9 +167,11 @@ def get_state(self): @restricted_access @cpq_permission(Permission.STARTSTOP) def set_active(self): - self._update( - DA.ACTIVATE if flask.request.form["active"] == "true" else DA.DEACTIVATE - ) + active = flask.request.form["active"] + if type(active) == str: + active = active.lower().strip() == "true" + + self._update(DA.ACTIVATE if active else DA.DEACTIVATE) return self._state_json() # Public method - adds a new set to an existing job, or creates a new job and adds the set there. @@ -193,8 +219,6 @@ def mv_job(self): new_id = dq.import_job_from_view(sq.get_job_view(src_id)) except ValidationError as e: return json.dumps(dict(error=str(e))) - - print("Imported job from view") sq.remove_jobs([src_id]) src_id = new_id @@ -246,7 +270,7 @@ def export_job(self): # PRIVATE API METHOD - may change without warning. @octoprint.plugin.BlueprintPlugin.route("/job/rm", methods=["POST"]) @restricted_access - @cpq_permission(Permission.EDITJOB) + @cpq_permission(Permission.RMJOB) def rm_job(self): return json.dumps( self._get_queue(flask.request.form["queue"]).remove_jobs( @@ -275,7 +299,7 @@ def get_history(self): # PRIVATE API METHOD - may change without warning. @octoprint.plugin.BlueprintPlugin.route("/history/reset", methods=["POST"]) @restricted_access - @cpq_permission(Permission.CLEARHISTORY) + @cpq_permission(Permission.RESETHISTORY) def reset_history(self): queries.resetHistory() return json.dumps("OK") @@ -296,3 +320,27 @@ def edit_queues(self): (absent_names, added) = queries.assignQueues(queues) self._commit_queues(added, absent_names) return json.dumps("OK") + + # PRIVATE API METHOD - may change without warning. + @octoprint.plugin.BlueprintPlugin.route("/automation/edit", methods=["POST"]) + @restricted_access + @cpq_permission(Permission.EDITAUTOMATION) + def edit_automation(self): + data = json.loads(flask.request.form.get("json")) + queries.assignAutomation(data["scripts"], data["preprocessors"], data["events"]) + return json.dumps("OK") + + # PRIVATE API METHOD - may change without warning. + @octoprint.plugin.BlueprintPlugin.route("/automation/get", methods=["GET"]) + @restricted_access + @cpq_permission(Permission.GETAUTOMATION) + def get_automation(self): + return json.dumps(queries.getAutomation()) + + # PRIVATE API METHOD - may change without warning. + @octoprint.plugin.BlueprintPlugin.route("/automation/external", methods=["POST"]) + @restricted_access + @cpq_permission(Permission.EDITAUTOMATION) + def set_automation_external_symbols(self): + self._set_external_symbols(flask.request.get_json()) + return json.dumps("OK") diff --git a/continuousprint/api_test.py b/continuousprint/api_test.py new file mode 100644 index 0000000..a7c37a7 --- /dev/null +++ b/continuousprint/api_test.py @@ -0,0 +1,303 @@ +import unittest +import json +import logging +from .driver import Action as DA +from unittest.mock import patch, MagicMock, call +import imp +from flask import Flask +from .api import Permission, cpq_permission +import continuousprint.api + + +class TestPermission(unittest.TestCase): + def test_as_dict(self): + d = Permission.ADDJOB.as_dict() + self.assertEqual(d["key"], "ADDJOB") + + @patch("continuousprint.api.Permissions") + def test_wrap_permission_ok(self, perms): + func = MagicMock(__name__="func") + wrapped = cpq_permission(Permission.ADDSET)(func) + + perms.PLUGIN_CONTINUOUSPRINT_ADDSET.can.return_value = True + wrapped() + func.assert_called_once() + + @patch("continuousprint.api.flask") + @patch("continuousprint.api.Permissions") + def test_wrap_permission_err(self, perms, flask): + func = MagicMock(__name__="func") + flask.make_response.return_value = "retval" + + wrapped = cpq_permission(Permission.ADDSET)(func) + perms.PLUGIN_CONTINUOUSPRINT_ADDSET.can.return_value = False + got = wrapped() + self.assertEqual(got, "retval") + func.assert_not_called() + + +class TestAPI(unittest.TestCase): + def setUp(self): # , plugin, restrict): + # Because handlers are decorated @restricted_access which + # expects octoprint to be initialized, we have to patch the + # decorator and reload the module so it isn't dependent on + # octoprint internal state. + def kill_patches(): + patch.stopall() + imp.reload(continuousprint.api) + + self.addCleanup(kill_patches) + patch( + "continuousprint.api.octoprint.server.util.flask.restricted_access", + lambda x: x, + ).start() + + imp.reload(continuousprint.api) + self.perm = patch("continuousprint.api.Permissions").start() + patch.object( + continuousprint.api.ContinuousPrintAPI, "__abstractmethods__", set() + ).start() + + self.app = Flask(__name__) + self.api = continuousprint.api.ContinuousPrintAPI() + self.api._basefolder = "notexisty" + self.api._identifier = "continuousprint" + self.api._get_queue = MagicMock() + self.api._logger = logging.getLogger() + self.app.register_blueprint(self.api.get_blueprint()) + self.app.config.update({"TESTING": True}) + self.client = self.app.test_client() + + def test_role_access_denied(self): + testcases = [ + ("GETSTATE", "/state/get"), + ("STARTSTOP", "/set_active"), + ("ADDSET", "/set/add"), + ("ADDJOB", "/job/add"), + ("EDITJOB", "/job/mv"), + ("EDITJOB", "/job/edit"), + ("ADDJOB", "/job/import"), + ("EXPORTJOB", "/job/export"), + ("RMJOB", "/job/rm"), + ("EDITJOB", "/job/reset"), + ("GETHISTORY", "/history/get"), + ("RESETHISTORY", "/history/reset"), + ("GETQUEUES", "/queues/get"), + ("EDITQUEUES", "/queues/edit"), + ("GETAUTOMATION", "/automation/get"), + ("EDITAUTOMATION", "/automation/edit"), + ("EDITAUTOMATION", "/automation/external"), + ] + self.api._get_queue = None # MagicMock interferes with checking + + num_handlers_tested = len(set([tc[1] for tc in testcases])) + handlers = [ + f + for f in dir(self.api) + if hasattr(getattr(self.api, f), "_blueprint_rules") + ] + self.assertEqual(num_handlers_tested, len(handlers)) + + num_perms_tested = len(set([tc[0] for tc in testcases])) + num_perms = len([p for p in Permission]) + self.assertEqual(num_perms_tested, num_perms) + + for (role, endpoint) in testcases: + p = getattr(self.perm, f"PLUGIN_CONTINUOUSPRINT_{role}") + p.can.return_value = False + if role.startswith("GET"): + rep = self.client.get(endpoint) + else: + rep = self.client.post(endpoint) + self.assertEqual(rep.status_code, 403) + + def test_get_state(self): + self.perm.PLUGIN_CONTINUOUSPRINT_GETSTATE.can.return_value = True + self.api._state_json = lambda: "foo" + rep = self.client.get("/state/get") + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.data, b"foo") + + def test_set_active(self): + self.perm.PLUGIN_CONTINUOUSPRINT_STARTSTOP.can.return_value = True + self.api._update = MagicMock() + self.api._state_json = lambda: "foo" + rep = self.client.post("/set_active", data=dict(active="true")) + self.assertEqual(rep.status_code, 200) + self.api._update.assert_called_with(DA.ACTIVATE) + + self.api._update.reset_mock() + rep = self.client.post("/set_active", data=dict(active=True)) + self.assertEqual(rep.status_code, 200) + self.api._update.assert_called_with(DA.ACTIVATE) + + self.api._update.reset_mock() + rep = self.client.post("/set_active", data=dict(active=False)) + self.assertEqual(rep.status_code, 200) + self.api._update.assert_called_with(DA.DEACTIVATE) + + self.api._update.reset_mock() + rep = self.client.post("/set_active", data=dict(active="whatever")) + self.assertEqual(rep.status_code, 200) + self.api._update.assert_called_with(DA.DEACTIVATE) + + def test_add_set(self): + self.perm.PLUGIN_CONTINUOUSPRINT_ADDSET.can.return_value = True + data = dict(foo="bar", job="jid") + self.api._get_queue().add_set.return_value = "ret" + self.api._preprocess_set = lambda s: s + + rep = self.client.post("/set/add", data=dict(json=json.dumps(data))) + + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.get_data(as_text=True), '"ret"') + self.api._get_queue().add_set.assert_called_with("jid", data) + + def test_add_job(self): + self.perm.PLUGIN_CONTINUOUSPRINT_ADDJOB.can.return_value = True + data = dict(name="jobname") + self.api._get_queue().add_job().as_dict.return_value = "ret" + + rep = self.client.post("/job/add", data=dict(json=json.dumps(data))) + + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.get_data(as_text=True), '"ret"') + self.api._get_queue().add_job.assert_called_with("jobname") + + def test_mv_job(self): + self.perm.PLUGIN_CONTINUOUSPRINT_EDITJOB.can.return_value = True + data = dict(id="foo", after_id="bar", src_queue="q1", dest_queue="q2") + + rep = self.client.post("/job/mv", data=data) + + self.assertEqual(rep.status_code, 200) + self.api._get_queue().mv_job.assert_called_with(data["id"], data["after_id"]) + + def test_edit_job(self): + self.perm.PLUGIN_CONTINUOUSPRINT_EDITJOB.can.return_value = True + data = dict(id="foo", queue="queue") + self.api._get_queue().edit_job.return_value = "ret" + rep = self.client.post("/job/edit", data=dict(json=json.dumps(data))) + + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.get_data(as_text=True), '"ret"') + self.api._get_queue().edit_job.assert_called_with(data["id"], data) + + def test_import_job(self): + self.perm.PLUGIN_CONTINUOUSPRINT_ADDJOB.can.return_value = True + data = dict(path="path", queue="queue") + self.api._get_queue().import_job().as_dict.return_value = "ret" + rep = self.client.post("/job/import", data=data) + + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.get_data(as_text=True), '"ret"') + self.api._get_queue().import_job.assert_called_with(data["path"]) + + def test_export_job(self): + self.perm.PLUGIN_CONTINUOUSPRINT_EXPORTJOB.can.return_value = True + data = {"job_ids[]": ["1", "2", "3"]} + self.api._get_queue().export_job.return_value = "ret" + self.api._path_in_storage = lambda p: p + self.api._path_on_disk = lambda p, sd: p + rep = self.client.post("/job/export", data=data) + + self.assertEqual(rep.status_code, 200) + self.assertEqual( + json.loads(rep.get_data(as_text=True)), + dict(errors=[], paths=["ret", "ret", "ret"]), + ) + self.api._get_queue().export_job.assert_has_calls( + [call(int(i), "/") for i in data["job_ids[]"]] + ) + + def test_rm_job(self): + self.perm.PLUGIN_CONTINUOUSPRINT_RMJOB.can.return_value = True + data = {"queue": "q", "job_ids[]": ["1", "2", "3"]} + self.api._get_queue().remove_jobs.return_value = "ret" + + rep = self.client.post("/job/rm", data=data) + + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.get_data(as_text=True), '"ret"') + self.api._get_queue().remove_jobs.assert_called_with(data["job_ids[]"]) + + def test_reset_multi(self): + self.perm.PLUGIN_CONTINUOUSPRINT_EDITJOB.can.return_value = True + data = {"queue": "q", "job_ids[]": ["1", "2", "3"]} + self.api._get_queue().reset_jobs.return_value = "ret" + + rep = self.client.post("/job/reset", data=data) + + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.get_data(as_text=True), '"ret"') + self.api._get_queue().reset_jobs.assert_called_with(data["job_ids[]"]) + + def test_get_history(self): + self.perm.PLUGIN_CONTINUOUSPRINT_GETHISTORY.can.return_value = True + self.api._history_json = lambda: "foo" + rep = self.client.get("/history/get") + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.data, b"foo") + + @patch("continuousprint.api.queries") + def test_reset_history(self, q): + self.perm.PLUGIN_CONTINUOUSPRINT_RESETHISTORY.can.return_value = True + rep = self.client.post("/history/reset") + q.resetHistory.assert_called_once() + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.data, b'"OK"') + + @patch("continuousprint.api.queries") + def test_get_queues(self, q): + self.perm.PLUGIN_CONTINUOUSPRINT_GETQUEUES.can.return_value = True + mq = MagicMock() + mq.as_dict.return_value = dict(foo="bar") + q.getQueues.return_value = [mq] + rep = self.client.get("/queues/get") + self.assertEqual(rep.status_code, 200) + self.assertEqual(json.loads(rep.get_data(as_text=True)), [dict(foo="bar")]) + + @patch("continuousprint.api.queries") + def test_edit_queues(self, q): + self.perm.PLUGIN_CONTINUOUSPRINT_EDITQUEUES.can.return_value = True + q.assignQueues.return_value = ("absent", "added") + self.api._commit_queues = MagicMock() + rep = self.client.post("/queues/edit", data=dict(json='"foo"')) + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.data, b'"OK"') + self.api._commit_queues.assert_called_with("added", "absent") + + @patch("continuousprint.api.queries") + def test_edit_automation(self, q): + self.perm.PLUGIN_CONTINUOUSPRINT_EDITAUTOMATION.can.return_value = True + rep = self.client.post( + "/automation/edit", + data=dict( + json=json.dumps( + dict( + scripts="scripts", + events="events", + preprocessors="preprocessors", + ) + ) + ), + ) + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.data, b'"OK"') + q.assignAutomation.assert_called_with("scripts", "preprocessors", "events") + + @patch("continuousprint.api.queries") + def test_get_automation(self, q): + self.perm.PLUGIN_CONTINUOUSPRINT_GETAUTOMATION.can.return_value = True + q.getAutomation.return_value = "foo" + rep = self.client.get("/automation/get") + self.assertEqual(rep.status_code, 200) + self.assertEqual(rep.data, b'"foo"') + + @patch("continuousprint.api.queries") + def test_automation_external(self, q): + self.perm.PLUGIN_CONTINUOUSPRINT_EDITAUTOMATION.can.return_value = True + self.api._set_external_symbols = MagicMock() + rep = self.client.post("/automation/external", json=dict(foo="bar")) + self.assertEqual(rep.status_code, 200) + self.api._set_external_symbols.assert_called_with(dict(foo="bar")) diff --git a/continuousprint/data/__init__.py b/continuousprint/data/__init__.py index b468e11..99d9cf2 100644 --- a/continuousprint/data/__init__.py +++ b/continuousprint/data/__init__.py @@ -11,29 +11,78 @@ with open(os.path.join(base, "gcode_scripts.yaml"), "r") as f: GCODE_SCRIPTS = dict((d["name"], d) for d in yaml.safe_load(f.read())["GScript"]) +with open(os.path.join(base, "preprocessors.yaml"), "r") as f: + PREPROCESSORS = dict( + (d["name"], d) for d in yaml.safe_load(f.read())["Preprocessors"] + ) + class CustomEvents(Enum): - START_PRINT = "continuousprint_start_print" - COOLDOWN = "continuousprint_cooldown" - CLEAR_BED = "continuousprint_clear_bed" - FINISH = "continuousprint_finish" - CANCEL = "continuousprint_cancel" + ACTIVATE = ( + "continuousprint_activate", + "Queue Activated", + "Fires when the queue is started, e.g. via the 'Start Managing' button.", + ) + PRINT_START = ( + "continuousprint_start_print", + "Print Start", + "Fires when a new print is starting from the queue. Unlike OctoPrint events, this does not fire when event scripts are executed.", + ) + PRINT_SUCCESS = ( + "continuousprint_success", + "Print Success", + "Fires when the active print finishes. This will also fire for prints running before the queue was started. The final print will fire QUEUE_FINISH instead of PRINT_SUCCESS.", + ) + PRINT_CANCEL = ( + "continuousprint_cancel", + "Print Cancel", + "Fires when automation or the user has cancelled the active print.", + ) + COOLDOWN = ( + "continuousprint_cooldown", + "Bed Cooldown", + "Fires when bed cooldown is starting. Bed Cooldown is disabled by default - see the settings below.", + ) + FINISH = ( + "continuousprint_finish", + "Queue Finished", + "Fires when there is no work left to do and the plugin goes idle.", + ) + AWAITING_MATERIAL = ( + "continuousprint_awaiting_material", + "Awaiting Material", + "Fires once when the current job requires a different material than what is currently loaded. This requires SpoolManager to be installed (see Integrations).", + ) + DEACTIVATE = ( + "continuousprint_deactivate", + "Queue Deactivated", + "Fires when the queue is no longer actively managed. This script may be skipped if another print is underway when the queue goes inactive.", + ) + + def __init__(self, event, displayName, desc): + self.event = event + self.displayName = displayName + self.desc = desc + + def as_dict(self): + return dict(event=self.event, display=self.displayName, desc=self.desc) class Keys(Enum): - # TODO migrate old setting names to enum names - QUEUE = ("cp_queue", None) + + BED_COOLDOWN_SCRIPT_DEPRECATED = ( + "cp_bed_cooldown_script", + "; Put script to run before bed cools here\n", + ) + FINISHED_SCRIPT_DEPRECATED = ("cp_queue_finished_script", "Generic Off") + CLEARING_SCRIPT_DEPRECATED = ("cp_bed_clearing_script", "Pause") + QUEUE_DEPRECATED = ("cp_queue", None) + PRINTER_PROFILE = ("cp_printer_profile", "Generic") - CLEARING_SCRIPT = ("cp_bed_clearing_script", "Pause") - FINISHED_SCRIPT = ("cp_queue_finished_script", "Generic Off") RESTART_MAX_RETRIES = ("cp_restart_on_pause_max_restarts", 3) RESTART_ON_PAUSE = ("cp_restart_on_pause_enabled", False) RESTART_MAX_TIME = ("cp_restart_on_pause_max_seconds", 60 * 60) BED_COOLDOWN_ENABLED = ("bed_cooldown_enabled", False) - BED_COOLDOWN_SCRIPT = ( - "cp_bed_cooldown_script", - "; Put script to run before bed cools here\n", - ) BED_COOLDOWN_THRESHOLD = ("bed_cooldown_threshold", 30) BED_COOLDOWN_TIMEOUT = ("bed_cooldown_timeout", 60) MATERIAL_SELECTION = ("cp_material_selection_enabled", False) @@ -58,13 +107,7 @@ def __init__(self, setting, default): PRINT_FILE_DIR = "ContinuousPrint" -TEMP_FILES = dict( - [ - (k.setting, f"{PRINT_FILE_DIR}/{k.setting}.gcode") - for k in [Keys.FINISHED_SCRIPT, Keys.CLEARING_SCRIPT, Keys.BED_COOLDOWN_SCRIPT] - ] -) - +TEMP_FILE_DIR = PRINT_FILE_DIR + "/tmp" ASSETS = dict( js=[ "js/cp_modified_sortable.js", diff --git a/continuousprint/data/data_test.py b/continuousprint/data/data_test.py index 280722a..22e03eb 100644 --- a/continuousprint/data/data_test.py +++ b/continuousprint/data/data_test.py @@ -1,5 +1,7 @@ import unittest -from ..data import GCODE_SCRIPTS, PRINTER_PROFILES +from io import StringIO +from ..data import GCODE_SCRIPTS, PRINTER_PROFILES, PREPROCESSORS +from asteval import Interpreter class TestGCODEScripts(unittest.TestCase): @@ -42,3 +44,132 @@ def test_referential_integrity(self): for s in ["clearBed", "finished"]: with self.subTest(profile=k, script=s): self.assertNotEqual(GCODE_SCRIPTS.get(v["defaults"][s]), None) + + +def test_preprocessor(name): + def preprocessor_decorator(func): + def testcase(self): + def runInterp(symtable): + stdout = StringIO() + interp = Interpreter(writer=stdout) + for (k, v) in symtable.items(): + interp.symtable[k] = v + return interp(PREPROCESSORS[name]["body"], raise_errors=True), stdout + + return func(self, runInterp) + + testcase._preprocessor_name = name + return testcase + + return preprocessor_decorator + + +class TestPreprocessors(unittest.TestCase): + def test_all_preprocessors_tested(self): + tested = set() + for k in dir(TestPreprocessors): + v = getattr(TestPreprocessors, k) + if hasattr(v, "_preprocessor_name"): + tested.add(v._preprocessor_name) + self.assertEqual(tested, set([p["name"] for p in PREPROCESSORS.values()])) + + def test_has_all_fields(self): + for k, v in PREPROCESSORS.items(): + with self.subTest(preprocessor=k): + self.assertEqual( + sorted(v.keys()), + sorted(["body", "name"]), + ) + + @test_preprocessor("If the bed temperature is >40C") + def test_bed_temp(self, pp): + self.assertEqual(pp(dict(current=dict(bed_temp=40)))[0], False) + self.assertEqual(pp(dict(current=dict(bed_temp=41)))[0], True) + + @test_preprocessor('If print filename ends in "_special.gcode"') + def test_filename_special(self, pp): + self.assertEqual(pp(dict(current=dict(path="foo.gcode")))[0], False) + self.assertEqual(pp(dict(current=dict(path="foo_special.gcode")))[0], True) + + @test_preprocessor("If print will be at least 10mm high") + def test_print_height(self, pp): + self.assertEqual( + pp(dict(metadata=dict(analysis=dict(dimensions=dict(height=9)))))[0], False + ) + self.assertEqual( + pp(dict(metadata=dict(analysis=dict(dimensions=dict(height=10)))))[0], True + ) + + @test_preprocessor("If print takes on average over an hour to complete") + def test_avg_print_time(self, pp): + self.assertEqual( + pp( + dict( + metadata=dict(statistics=dict(averagePrintTime=dict(_default=500))) + ) + )[0], + False, + ) + self.assertEqual( + pp( + dict( + metadata=dict( + statistics=dict(averagePrintTime=dict(_default=60 * 60 + 1)) + ) + ) + )[0], + True, + ) + + @test_preprocessor("If print has failed more than 10% of the time") + def test_failure_rate(self, pp): + history = [dict(success=False)] + for i in range(10): + history.append(dict(success=True)) + self.assertEqual(pp(dict(metadata=dict(history=history)))[0], False) + history.append(dict(success=False)) + self.assertEqual(pp(dict(metadata=dict(history=history)))[0], True) + + @test_preprocessor("Also notify of bed temperature") + def test_notify(self, pp): + result, stdout = pp(dict(current=dict(bed_temp=1))) + stdout.seek(0) + self.assertEqual(stdout.read(), "Preprocessor says the bed temperature is 1\n") + + @test_preprocessor("Error and pause if bed is >60C") + def test_error(self, pp): + self.assertEqual(pp(dict(current=dict(bed_temp=1)))[0], True) + with self.assertRaisesRegex(Exception, "600C"): + pp(dict(current=dict(bed_temp=600))) + + @test_preprocessor("If starting from idle (first run, or ran finished script)") + def test_from_idle(self, pp): + self.assertEqual( + pp( + dict( + current=dict(state="clearing"), + ) + )[0], + False, + ) + self.assertEqual( + pp( + dict( + current=dict(state="inactive"), + ) + )[0], + True, + ) + self.assertEqual( + pp( + dict( + current=dict(state="idle"), + ) + )[0], + True, + ) + + @test_preprocessor("If externally set variable is True") + def test_extern(self, pp): + self.assertEqual(pp(dict(external=dict(testval=True)))[0], True) + self.assertEqual(pp(dict(external=dict(testval=False)))[0], False) diff --git a/continuousprint/data/preprocessors.yaml b/continuousprint/data/preprocessors.yaml new file mode 100644 index 0000000..8aee3cb --- /dev/null +++ b/continuousprint/data/preprocessors.yaml @@ -0,0 +1,37 @@ +# Remember to add tests in data_test.py (see TestPreprocessors class) +Preprocessors: + - name: "If the bed temperature is >40C" + body: | + current['bed_temp'] > 40 + - name: "If print filename ends in \"_special.gcode\"" + body: | + current['path'].endswith("_special.gcode") + - name: "If print will be at least 10mm high" + body: | + metadata["analysis"]["dimensions"]["height"] >= 10 + - name: "If print takes on average over an hour to complete" + body: | + metadata["statistics"]["averagePrintTime"]["_default"] > 60*60 + - name: "If print has failed more than 10% of the time" + body: | + # Div by 1 when history is empty to prevent divide by zero + failure_ratio = len([h for h in metadata["history"] if not h['success']]) / max(1, len(metadata["history"])) + False if len(metadata["history"]) == 0 else failure_ratio > 0.1 + - name: "Also notify of bed temperature" + body: | + print("Preprocessor says the bed temperature is", current['bed_temp']) + True + - name: "Error and pause if bed is >60C" + body: | + if current['bed_temp'] > 60: + raise Exception("Bed temp " + str(current['bed_temp']) + "C) is too hot for this operation") + True + - name: "If starting from idle (first run, or ran finished script)" + body: | + # Both current and previous printer state are checked here to ensure printer is properly idle + current["state"] in ("inactive", "idle") + - name: "If externally set variable is True" + body: | + # Set external values via web request, e.g. + # curl -X POST -d '{"testval": true}' http://printer:5000/plugin/continuousprint/automation/external + external["testval"] == True diff --git a/continuousprint/driver.py b/continuousprint/driver.py index fa0a0a8..cc3d1c9 100644 --- a/continuousprint/driver.py +++ b/continuousprint/driver.py @@ -1,6 +1,7 @@ import time from multiprocessing import Lock from enum import Enum, auto +from .data import CustomEvents class Action(Enum): @@ -38,6 +39,7 @@ def timeAgo(elapsed): class Driver: # If the printer is idle for this long while printing, break out of the printing state (consider it a failure) PRINTING_IDLE_BREAKOUT_SEC = 15.0 + TIMELAPSE_WAIT_SEC = 30 def __init__( self, @@ -68,6 +70,8 @@ def __init__( self._update_ui = False self._cur_path = None self._cur_materials = [] + self._bed_temp = 0 + self._timelapse_start_ts = None def action( self, @@ -76,6 +80,7 @@ def action( path: str = None, materials: list = [], bed_temp=None, + timelapse_start_ts=None, ): # Given that some calls to action() come from a watchdog timer, we hold a mutex when performing the action # so the state is updated in a thread safe way. @@ -83,7 +88,7 @@ def action( now = time.time() if self.printer_state_ts + 15 > now or a != Action.TICK: self._logger.debug( - f"{a.name}, {p.name}, path={path}, materials={materials}, bed_temp={bed_temp}" + f"{a.name}, {p.name}, path={path}, materials={materials}, bed_temp={bed_temp}, timelapse_ts={timelapse_start_ts}" ) elif a == Action.TICK and not self.printer_state_logs_suppressed: self.printer_state_logs_suppressed = True @@ -102,11 +107,20 @@ def action( self._cur_materials = materials if bed_temp is not None: self._bed_temp = bed_temp + self._timelapse_start_ts = timelapse_start_ts + self._runner.set_current_symbols( + dict( + path=self._cur_path, + materials=self._cur_materials, + bed_temp=self._bed_temp, + state=self.state.__name__.strip("_state_"), + ) + ) # Deactivation must be allowed on all states, so we hande it here for # completeness. if a == Action.DEACTIVATE and self.state != self._state_inactive: - nxt = self._state_inactive + nxt = self._enter_inactive() else: nxt = self.state(a, p) @@ -123,11 +137,18 @@ def action( def _state_unknown(self, a: Action, p: Printer): pass + def _enter_inactive(self): + self._runner.run_script_for_event(CustomEvents.DEACTIVATE) + return self._state_inactive + def _state_inactive(self, a: Action, p: Printer): self.q.release() self.retries = 0 if a == Action.ACTIVATE: + if self._runner.run_script_for_event(CustomEvents.ACTIVATE) is not None: + return self._state_activating + if p != Printer.IDLE: return self._state_printing else: @@ -140,6 +161,11 @@ def _state_inactive(self, a: Action, p: Printer): "Inactive (active print continues unmanaged)", StatusType.NEEDS_ACTION ) + def _state_activating(self, a: Action, p: Printer): + self._set_status("Running startup script") + if a == Action.SUCCESS or self._long_idle(p): + return self._state_idle(a, p) + def _state_idle(self, a: Action, p: Printer): self.q.release() @@ -152,8 +178,17 @@ def _state_idle(self, a: Action, p: Printer): else: return self._enter_start_print(a, p) - def _enter_start_print(self, a: Action, p: Printer): - # TODO "clear bed on startup" setting + def _state_preprint(self, a: Action, p: Printer): + self._set_status("Running pre-print script") + if a == Action.SUCCESS or self._long_idle(p): + # Skip running the pre-print script this time + return self._enter_start_print(a, p, run_pre_script=False) + + def _enter_start_print(self, a: Action, p: Printer, run_pre_script=True): + if run_pre_script and self._runner.run_script_for_event( + CustomEvents.PRINT_START + ): + return self._state_preprint # Pre-call start_print on entry to eliminate tick delay self.start_failures = 0 @@ -169,6 +204,29 @@ def _fmt_material_key(self, mk): except AttributeError: return mk + def _materials_match(self, item): + for i, im in enumerate(item.materials()): + if im is None: # No constraint + continue + cur = self._cur_materials[i] if i < len(self._cur_materials) else None + if im != cur: + return False + return True + + def _state_awaiting_material(self, a: Action, p: Printer): + item = self.q.get_set_or_acquire() + if item is None: + self._set_status("No work to do; going idle") + return self._state_idle + + if self._materials_match(item): + return self._enter_start_print(a, p) + else: + self._set_status( + f"Need {self._fmt_material_key(im)} in tool {i}, but {self._fmt_material_key(cur)} is loaded", + StatusType.NEEDS_ACTION, + ) + def _state_start_print(self, a: Action, p: Printer): if p != Printer.IDLE: self._set_status("Waiting for printer to be ready") @@ -179,17 +237,9 @@ def _state_start_print(self, a: Action, p: Printer): self._set_status("No work to do; going idle") return self._state_idle - # Block until we have the right materials loaded (if required) - for i, im in enumerate(item.materials()): - if im is None: # No constraint - continue - cur = self._cur_materials[i] if i < len(self._cur_materials) else None - if im != cur: - self._set_status( - f"Need {self._fmt_material_key(im)} in tool {i}, but {self._fmt_material_key(cur)} is loaded", - StatusType.NEEDS_ACTION, - ) - return + if not self._materials_match(item): + self._runner.run_script_for_event(CustomEvents.AWAITING_MATERIAL) + return self._state_awaiting_material self.q.begin_run() if self._runner.start_print(item): @@ -199,7 +249,7 @@ def _state_start_print(self, a: Action, p: Printer): self.start_failures += 1 if self.start_failures >= self.max_startup_attempts: self._set_status("Failed to start; too many attempts", StatusType.ERROR) - return self._state_inactive + return self._enter_inactive() else: self._set_status( f"Start attempt failed ({self.start_failures}/{self.max_startup_attempts})", @@ -259,14 +309,14 @@ def _state_paused(self, a: Action, p: Printer): if self._long_idle(p): # Here, IDLE implies the user cancelled the print. # Go inactive to prevent stomping on manual changes - return self._state_inactive + return self._enter_inactive() elif p == Printer.BUSY: return self._state_printing def _state_spaghetti_recovery(self, a: Action, p: Printer): self._set_status("Cancelling (spaghetti early in print)", StatusType.ERROR) if p == Printer.PAUSED: - self._runner.cancel_print() + self._runner.run_script_for_event(CustomEvents.PRINT_CANCEL) return self._state_failure def _state_failure(self, a: Action, p: Printer): @@ -279,16 +329,26 @@ def _state_failure(self, a: Action, p: Printer): else: self.q.end_run("failure") self._set_status("Failure (max retries exceeded", StatusType.ERROR) - return self._state_inactive + return self._enter_inactive() def _state_success(self, a: Action, p: Printer): + # Wait for timelapse to complete; allows associating the timelapse + # in the history and prevents performance issues due to render cpu + now = time.time() + if ( + self._timelapse_start_ts is not None + and now < self._timelapse_start_ts + self.TIMELAPSE_WAIT_SEC + ): + self._set_status("Waiting for timelapse to render", StatusType.NORMAL) + return + # Complete prior queue item if that's what we just finished. # Note that end_run fails silently if there's no active run # (e.g. if we start managing mid-print) self.q.end_run("success") self.retries = 0 - # Clear bed if we have a next queue item, otherwise run finishing script + # Clear bed if we have a next item, otherwise run finishing script item = self.q.get_set_or_acquire() if item is not None: self._logger.debug( @@ -305,14 +365,14 @@ def _state_start_clearing(self, a: Action, p: Printer): return if self.managed_cooldown: - self._runner.start_cooldown() + self._runner.run_script_for_event(CustomEvents.COOLDOWN) self.cooldown_start = time.time() self._logger.info( f"Cooldown initiated (threshold={self.cooldown_threshold}, timeout={self.cooldown_timeout})" ) return self._state_cooldown else: - self._runner.clear_bed() + self._runner.run_script_for_event(CustomEvents.PRINT_SUCCESS) return self._state_clearing def _state_cooldown(self, a: Action, p: Printer): @@ -329,7 +389,7 @@ def _state_cooldown(self, a: Action, p: Printer): self._set_status("Cooling down") if clear: - self._runner.clear_bed() + self._runner.run_script_for_event(CustomEvents.PRINT_SUCCESS) return self._state_clearing def _state_clearing(self, a: Action, p: Printer): @@ -337,7 +397,7 @@ def _state_clearing(self, a: Action, p: Printer): return self._enter_start_print(a, p) elif a == Action.FAILURE: self._set_status("Error when clearing bed - aborting", StatusType.ERROR) - return self._state_inactive # Skip past failure state to inactive + return self._enter_inactive() # Skip past failure state to inactive if self._long_idle(p): # Idle state without event; assume success return self._enter_start_print(a, p) @@ -349,12 +409,12 @@ def _state_start_finishing(self, a: Action, p: Printer): self._set_status("Waiting for printer to be ready") return - self._runner.run_finish_script() + self._runner.run_script_for_event(CustomEvents.FINISH) return self._state_finishing def _state_finishing(self, a: Action, p: Printer): if a == Action.FAILURE: - return self._state_inactive + return self._enter_inactive() # Idle state without event -> assume success and go idle if a == Action.SUCCESS or self._long_idle(p): diff --git a/continuousprint/driver_test.py b/continuousprint/driver_test.py index e40aa9c..84c3b62 100644 --- a/continuousprint/driver_test.py +++ b/continuousprint/driver_test.py @@ -3,6 +3,7 @@ import time from unittest.mock import MagicMock, ANY from .driver import Driver, Action as DA, Printer as DP +from .data import CustomEvents import logging import traceback @@ -18,34 +19,74 @@ def setUp(self): ) self.d.set_retry_on_pause(True) self.d.action(DA.DEACTIVATE, DP.IDLE) + self.d._runner.run_script_for_event.reset_mock() item = MagicMock(path="asdf") # return same item by default every time self.d.q.get_set_or_acquire.return_value = item self.d.q.get_set.return_value = item + def test_activate_with_startup_script(self): + self.d._runner.run_script_for_event.side_effect = ["foo.gcode", None] + self.d.action(DA.ACTIVATE, DP.IDLE) # -> activating + self.d.q.begin_run.assert_not_called() + self.d._runner.start_print.assert_not_called() + self.assertEqual(self.d.state.__name__, self.d._state_activating.__name__) + self.d._runner.run_script_for_event.assert_called_with(CustomEvents.ACTIVATE) + # Stays in activating while script is running + self.d.action(DA.TICK, DP.BUSY) + self.assertEqual(self.d.state.__name__, self.d._state_activating.__name__) + + # Exits to start printing when script completes + self.d.action(DA.SUCCESS, DP.IDLE) + self.assertEqual(self.d.state.__name__, self.d._state_printing.__name__) + self.d._runner.start_print.assert_called() + + def test_activate_with_preprint_script(self): + # First call is ACTIVATE, second call is PRINT_START + self.d._runner.run_script_for_event.side_effect = [None, "foo.gcode", None] + self.d.action(DA.ACTIVATE, DP.IDLE) # -> preprint + self.d.q.begin_run.assert_not_called() + self.d._runner.start_print.assert_not_called() + self.assertEqual(self.d.state.__name__, self.d._state_preprint.__name__) + self.d._runner.run_script_for_event.assert_called_with(CustomEvents.PRINT_START) + # Stays in preprint while script is runnig + self.d.action(DA.TICK, DP.BUSY) + self.assertEqual(self.d.state.__name__, self.d._state_preprint.__name__) + + # Exits to start printing when script completes + self.d.action(DA.SUCCESS, DP.IDLE) + self.assertEqual(self.d.state.__name__, self.d._state_printing.__name__) + self.d._runner.start_print.assert_called() + def test_activate_not_yet_printing(self): + self.d._runner.run_script_for_event.return_value = None self.d.action(DA.ACTIVATE, DP.IDLE) # -> start_printing -> printing self.d.q.begin_run.assert_called() self.d._runner.start_print.assert_called_with(self.d.q.get_set.return_value) self.assertEqual(self.d.state.__name__, self.d._state_printing.__name__) + self.d._runner.run_script_for_event.assert_called_with(CustomEvents.PRINT_START) def test_activate_already_printing(self): + self.d._runner.run_script_for_event.return_value = None self.d.action(DA.ACTIVATE, DP.BUSY) self.d.action(DA.TICK, DP.BUSY) self.d._runner.start_print.assert_not_called() self.assertEqual(self.d.state.__name__, self.d._state_printing.__name__) + self.d._runner.run_script_for_event.assert_called_with(CustomEvents.ACTIVATE) def test_events_cause_no_action_when_inactive(self): def assert_nocalls(): - self.d._runner.run_finish_script.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.d._runner.start_print.assert_not_called() for p in [DP.IDLE, DP.BUSY, DP.PAUSED]: for a in [DA.SUCCESS, DA.FAILURE, DA.TICK, DA.DEACTIVATE, DA.SPAGHETTI]: + self.d._runner.run_script_for_event.reset_mock() self.d.action(a, p) assert_nocalls() self.assertEqual(self.d.state.__name__, self.d._state_inactive.__name__) def test_completed_print_not_in_queue(self): + self.d._runner.run_script_for_event.return_value = None self.d.action(DA.ACTIVATE, DP.BUSY) # -> start print -> printing self.d.action(DA.SUCCESS, DP.IDLE, "otherprint.gcode") # -> success self.d.action(DA.TICK, DP.IDLE) # -> start_clearing @@ -66,10 +107,10 @@ def test_start_clearing_waits_for_idle(self): self.d.state = self.d._state_start_clearing self.d.action(DA.TICK, DP.BUSY) self.assertEqual(self.d.state.__name__, self.d._state_start_clearing.__name__) - self.d._runner.clear_bed.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.d.action(DA.TICK, DP.PAUSED) self.assertEqual(self.d.state.__name__, self.d._state_start_clearing.__name__) - self.d._runner.clear_bed.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() def test_idle_while_printing(self): self.d.state = self.d._state_printing @@ -115,14 +156,17 @@ def test_bed_clearing_cooldown_threshold(self): self.d.state = self.d._state_start_clearing self.d.action(DA.TICK, DP.IDLE, bed_temp=21) self.assertEqual(self.d.state.__name__, self.d._state_cooldown.__name__) + self.d._runner.run_script_for_event.reset_mock() self.d.action( DA.TICK, DP.IDLE, bed_temp=21 ) # -> stays in cooldown since bed temp too high self.assertEqual(self.d.state.__name__, self.d._state_cooldown.__name__) - self.d._runner.clear_bed.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.d.action(DA.TICK, DP.IDLE, bed_temp=19) # -> exits cooldown self.assertEqual(self.d.state.__name__, self.d._state_clearing.__name__) - self.d._runner.clear_bed.assert_called() + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.PRINT_SUCCESS + ) def test_bed_clearing_cooldown_timeout(self): self.d.set_managed_cooldown(True, 20, 60) @@ -131,13 +175,17 @@ def test_bed_clearing_cooldown_timeout(self): self.assertEqual(self.d.state.__name__, self.d._state_cooldown.__name__) orig_start = self.d.cooldown_start self.d.cooldown_start = orig_start - 60 * 59 # Still within timeout range + + self.d._runner.run_script_for_event.reset_mock() self.d.action(DA.TICK, DP.IDLE, bed_temp=21) self.assertEqual(self.d.state.__name__, self.d._state_cooldown.__name__) self.d.cooldown_start = orig_start - 60 * 61 - self.d._runner.clear_bed.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.d.action(DA.TICK, DP.IDLE, bed_temp=21) # exit due to timeout self.assertEqual(self.d.state.__name__, self.d._state_clearing.__name__) - self.d._runner.clear_bed.assert_called() + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.PRINT_SUCCESS + ) def test_finishing_failure(self): self.d.state = self.d._state_finishing @@ -145,6 +193,7 @@ def test_finishing_failure(self): self.assertEqual(self.d.state.__name__, self.d._state_inactive.__name__) def test_completed_last_print(self): + self.d._runner.run_script_for_event.return_value = None self.d.action(DA.ACTIVATE, DP.IDLE) # -> start_print -> printing self.d._runner.start_print.reset_mock() @@ -155,7 +204,7 @@ def test_completed_last_print(self): self.d.action(DA.TICK, DP.IDLE) # -> start_finishing self.d.printer_state_ts = time.time() - (Driver.PRINTING_IDLE_BREAKOUT_SEC + 1) self.d.action(DA.TICK, DP.IDLE) # -> finishing - self.d._runner.run_finish_script.assert_called() + self.d._runner.run_script_for_event.assert_called_with(CustomEvents.FINISH) self.assertEqual(self.d.state.__name__, self.d._state_finishing.__name__) self.d.action(DA.TICK, DP.IDLE) # -> idle @@ -173,10 +222,13 @@ def setUp(self): item = MagicMock(path="asdf") # return same item by default every time self.d.q.get_set_or_acquire.return_value = item self.d.q.get_set.return_value = item + self.d._runner.run_script_for_event.return_value = None self.d.action(DA.DEACTIVATE, DP.IDLE) self.d.action(DA.ACTIVATE, DP.IDLE) # -> start_print -> printing + self.d._runner.run_script_for_event.reset_mock() def test_success(self): + # Note: also implicitly tests when timelapse is disabled self.d._runner.start_print.reset_mock() self.d.action( @@ -191,18 +243,55 @@ def test_success(self): self.d.q.get_set.return_value = item2 self.d.action(DA.TICK, DP.IDLE) # -> clearing - self.d._runner.clear_bed.assert_called_once() + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.PRINT_SUCCESS + ) self.d.action(DA.SUCCESS, DP.IDLE) # -> start_print -> printing self.d._runner.start_print.assert_called_with(item2) + def test_success_waits_for_timelapse(self): + now = time.time() + self.d.action( + DA.SUCCESS, + DP.IDLE, + path=self.d.q.get_set.return_value.path, + timelapse_start_ts=now, + ) # -> success, but wait for timelapse + self.d.action(DA.TICK, DP.IDLE, timelapse_start_ts=now) # -> still success + self.assertEqual(self.d.state.__name__, self.d._state_success.__name__) + + item2 = MagicMock(path="basdf") + self.d.q.get_set_or_acquire.return_value = ( + item2 # manually move the supervisor forward in the queue + ) + self.d.q.get_set.return_value = item2 + + self.d.action(DA.TICK, DP.IDLE) # -> clearing + self.assertEqual(self.d.state.__name__, self.d._state_start_clearing.__name__) + + def test_success_timelapse_timeout(self): + now = time.time() + self.d.action( + DA.SUCCESS, + DP.IDLE, + path=self.d.q.get_set.return_value.path, + timelapse_start_ts=now - self.d.TIMELAPSE_WAIT_SEC - 1, + ) # -> success, but wait for timelapse + self.d.action( + DA.TICK, DP.IDLE, timelapse_start_ts=now - self.d.TIMELAPSE_WAIT_SEC - 1 + ) # -> timeout to clearing + self.assertEqual(self.d.state.__name__, self.d._state_start_clearing.__name__) + def test_paused_with_spaghetti_early_triggers_cancel(self): self.d.q.get_run.return_value = MagicMock( start=datetime.datetime.now() - datetime.timedelta(seconds=10) ) self.d.action(DA.SPAGHETTI, DP.BUSY) # -> spaghetti_recovery self.d.action(DA.TICK, DP.PAUSED) # -> cancel + failure - self.d._runner.cancel_print.assert_called() + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.PRINT_CANCEL + ) self.assertEqual(self.d.state.__name__, self.d._state_failure.__name__) def test_paused_with_spaghetti_late_waits_for_user(self): @@ -212,7 +301,7 @@ def test_paused_with_spaghetti_late_waits_for_user(self): ) self.d.action(DA.SPAGHETTI, DP.BUSY) # -> printing (ignore spaghetti) self.d.action(DA.TICK, DP.PAUSED) # -> paused - self.d._runner.cancel_print.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.assertEqual(self.d.state.__name__, self.d._state_paused.__name__) def test_paused_manually_early_waits_for_user(self): @@ -221,7 +310,7 @@ def test_paused_manually_early_waits_for_user(self): ) self.d.action(DA.TICK, DP.PAUSED) # -> paused self.d.action(DA.TICK, DP.PAUSED) # stay in paused state - self.d._runner.cancel_print.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.assertEqual(self.d.state.__name__, self.d._state_paused.__name__) def test_paused_manually_late_waits_for_user(self): @@ -230,26 +319,28 @@ def test_paused_manually_late_waits_for_user(self): ) self.d.action(DA.TICK, DP.PAUSED) # -> paused self.d.action(DA.TICK, DP.PAUSED) # stay in paused state - self.d._runner.cancel_print.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.assertEqual(self.d.state.__name__, self.d._state_paused.__name__) def test_paused_on_temp_file_falls_through(self): self.d.state = self.d._state_clearing # -> clearing self.d.action(DA.TICK, DP.PAUSED) - self.d._runner.cancel_print.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.assertEqual(self.d.state.__name__, self.d._state_clearing.__name__) self.d.state = self.d._state_finishing # -> finishing self.d.action(DA.TICK, DP.PAUSED) - self.d._runner.cancel_print.assert_not_called() + self.d._runner.run_script_for_event.assert_not_called() self.assertEqual(self.d.state.__name__, self.d._state_finishing.__name__) def test_user_deactivate_sets_inactive(self): self.d._runner.start_print.reset_mock() self.d.action(DA.DEACTIVATE, DP.IDLE) # -> inactive + self.d._runner.run_script_for_event.assert_called self.assertEqual(self.d.state.__name__, self.d._state_inactive.__name__) self.d._runner.start_print.assert_not_called() + self.d._runner.run_script_for_event.assert_called_with(CustomEvents.DEACTIVATE) self.d.q.end_run.assert_not_called() @@ -261,6 +352,7 @@ def setUp(self): logger=logging.getLogger(), ) self.d.set_retry_on_pause(True) + self.d._runner.run_script_for_event.return_value = None self.d.action(DA.DEACTIVATE, DP.IDLE) def _setItemMaterials(self, m): @@ -285,13 +377,23 @@ def test_tool1mat_none(self): self._setItemMaterials(["tool1mat"]) self.d.action(DA.ACTIVATE, DP.IDLE) self.d._runner.start_print.assert_not_called() - self.assertEqual(self.d.state.__name__, self.d._state_start_print.__name__) + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.AWAITING_MATERIAL + ) + self.assertEqual( + self.d.state.__name__, self.d._state_awaiting_material.__name__ + ) def test_tool1mat_wrong(self): self._setItemMaterials(["tool1mat"]) self.d.action(DA.ACTIVATE, DP.IDLE, materials=["tool0bad"]) self.d._runner.start_print.assert_not_called() - self.assertEqual(self.d.state.__name__, self.d._state_start_print.__name__) + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.AWAITING_MATERIAL + ) + self.assertEqual( + self.d.state.__name__, self.d._state_awaiting_material.__name__ + ) def test_tool1mat_ok(self): self._setItemMaterials(["tool1mat"]) @@ -315,4 +417,16 @@ def test_tool1mat_tool2mat_reversed(self): self._setItemMaterials(["tool1mat", "tool2mat"]) self.d.action(DA.ACTIVATE, DP.IDLE, materials=["tool2mat", "tool1mat"]) self.d._runner.start_print.assert_not_called() - self.assertEqual(self.d.state.__name__, self.d._state_start_print.__name__) + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.AWAITING_MATERIAL + ) + self.assertEqual( + self.d.state.__name__, self.d._state_awaiting_material.__name__ + ) + + def test_recovery(self): + self._setItemMaterials(["tool0mat"]) + self.d.action(DA.ACTIVATE, DP.IDLE, materials=["tool0bad"]) # awaiting + self.d.action(DA.ACTIVATE, DP.IDLE, materials=["tool0mat"]) + self.d._runner.start_print.assert_called() + self.assertEqual(self.d.state.__name__, self.d._state_printing.__name__) diff --git a/continuousprint/integration_test.py b/continuousprint/integration_test.py index f9f9613..8d848cb 100644 --- a/continuousprint/integration_test.py +++ b/continuousprint/integration_test.py @@ -1,17 +1,22 @@ import unittest import datetime import time +import tempfile from unittest.mock import MagicMock, ANY from .driver import Driver, Action as DA, Printer as DP +from pathlib import Path import logging import traceback from .storage.database_test import DBTest -from .storage.database import DEFAULT_QUEUE, MODELS, populate as populate_db +from .storage.database import DEFAULT_QUEUE, MODELS, populate_queues from .storage import queries +from .storage.lan import LANJobView from .queues.multi import MultiQueue from .queues.local import LocalQueue from .queues.lan import LANQueue from .queues.abstract import Strategy +from .data import CustomEvents +from .script_runner import ScriptRunner from peewee import SqliteDatabase from collections import defaultdict from peerprint.lan_queue import LANPrintQueueBase @@ -38,6 +43,10 @@ def onupdate(): script_runner=MagicMock(), logger=logging.getLogger(), ) + + # Bypass running of scripts on activate, start print, deactivate etc. + self.d._runner.run_script_for_event.return_value = None + self.d.set_retry_on_pause(True) self.d.action(DA.DEACTIVATE, DP.IDLE) @@ -60,8 +69,10 @@ def assert_from_printing_state(self, want_path, finishing=False): self.d.state.__name__, self.d._state_start_clearing.__name__ ) self.d.action(DA.TICK, DP.IDLE) # -> clearing - self.d._runner.clear_bed.assert_called() - self.d._runner.clear_bed.reset_mock() + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.PRINT_SUCCESS + ) + self.d._runner.run_script_for_event.reset_mock() self.d.action(DA.SUCCESS, DP.IDLE) # -> start_print else: # Finishing self.d.action(DA.TICK, DP.IDLE) # -> start_finishing @@ -69,8 +80,10 @@ def assert_from_printing_state(self, want_path, finishing=False): self.d.state.__name__, self.d._state_start_finishing.__name__ ) self.d.action(DA.TICK, DP.IDLE) # -> finishing - self.d._runner.run_finish_script.assert_called() - self.d._runner.run_finish_script.reset_mock() + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.FINISH + ) + self.d._runner.run_script_for_event.reset_mock() self.d.action(DA.SUCCESS, DP.IDLE) # -> inactive self.assertEqual(self.d.state.__name__, self.d._state_idle.__name__) except AssertionError as e: @@ -103,7 +116,9 @@ def test_retries_failure(self): self.d.action(DA.ACTIVATE, DP.IDLE) # -> start_print -> printing self.d.action(DA.SPAGHETTI, DP.BUSY) # -> spaghetti_recovery self.d.action(DA.TICK, DP.PAUSED) # -> cancel + failure - self.d._runner.cancel_print.assert_called() + self.d._runner.run_script_for_event.assert_called_with( + CustomEvents.PRINT_CANCEL + ) self.assertEqual(self.d.state.__name__, self.d._state_failure.__name__) def test_multi_job(self): @@ -164,6 +179,74 @@ def test_completes_job_in_order(self): self.assert_from_printing_state("b.gcode", finishing=True) +class TestDriver(DBTest): + def setUp(self): + super().setUp() + + def onupdate(): + pass + + self.fm = MagicMock() + self.s = ScriptRunner( + msg=MagicMock(), + file_manager=self.fm, + logger=logging.getLogger(), + printer=MagicMock(), + refresh_ui_state=MagicMock(), + fire_event=MagicMock(), + ) + self.s._get_user = lambda: "foo" + self.s._wrap_stream = MagicMock(return_value=None) + self.mq = MultiQueue(queries, Strategy.IN_ORDER, onupdate) + self.d = Driver( + queue=self.mq, + script_runner=self.s, + logger=logging.getLogger(), + ) + self.d.set_retry_on_pause(True) + + def _setup_condition(self, cond, path=None): + self.d.state = self.d._state_inactive + queries.assignAutomation( + dict(foo="G0 X20"), + dict(bar=cond), + {CustomEvents.ACTIVATE.event: [dict(script="foo", preprocessor="bar")]}, + ) + self.d.action(DA.ACTIVATE, DP.IDLE, path=path) + + def test_conditional_true(self): + self._setup_condition("2 + 2 == 4") + self.assertEqual(self.d.state.__name__, self.d._state_activating.__name__) + + def test_conditional_false(self): + self._setup_condition("2 + 2 == 5") + self.assertEqual(self.d.state.__name__, self.d._state_idle.__name__) + + def test_conditional_error(self): + self._setup_condition("1 / 0") + # Pauses via script run + self.assertEqual(self.d.state.__name__, self.d._state_activating.__name__) + + def test_conditional_print_volume(self): + depthpath = "metadata['analysis']['dimensions']['depth']" + self.fm.file_exists.return_value = True + self.fm.has_analysis.return_value = True + self.fm.get_metadata.return_value = dict( + analysis=dict(dimensions=dict(depth=39)) + ) + + self._setup_condition(f"{depthpath} < 40", path="foo.gcode") + self.assertEqual(self.d.state.__name__, self.d._state_activating.__name__) + + self._setup_condition(f"{depthpath} > 39", path="foo.gcode") + self.assertEqual(self.d.state.__name__, self.d._state_idle.__name__) + + def test_external_symbols(self): + self.s.set_external_symbols(dict(foo="bar")) + self._setup_condition("external['foo'] == 'bar'", path="foo.gcode") + self.assertEqual(self.d.state.__name__, self.d._state_activating.__name__) + + class LocalLockManager: def __init__(self, locks, ns): self.locks = locks @@ -261,18 +344,22 @@ def onupdate(): self.locks = {} self.peers = [] + lqpeers = {} + lqjobs = TestReplDict(lambda a, b: None) for i, db in enumerate(self.dbs): with db.bind_ctx(MODELS): - populate_db() + populate_queues() + fsm = MagicMock(host="fsaddr", port=0) + profile = dict(name="profile") lq = LANQueue( "LAN", f"peer{i}:{12345+i}", logging.getLogger(f"peer{i}:LAN"), Strategy.IN_ORDER, onupdate, - MagicMock(), - dict(name="profile"), - lambda path: path, + fsm, + profile, + lambda path, sd: path, ) mq = MultiQueue(queries, Strategy.IN_ORDER, onupdate) mq.add(lq.ns, lq) @@ -281,17 +368,16 @@ def onupdate(): script_runner=MagicMock(), logger=logging.getLogger(f"peer{i}:Driver"), ) + d._runner.run_script_for_event.return_value = None d.set_retry_on_pause(True) d.action(DA.DEACTIVATE, DP.IDLE) lq.lan.q = LANPrintQueueBase( lq.ns, lq.addr, MagicMock(), logging.getLogger("lantestbase") ) lq.lan.q.locks = LocalLockManager(self.locks, f"peer{i}") - lq.lan.q.jobs = TestReplDict(lambda a, b: None) - lq.lan.q.peers = self.peers - if i > 0: - lq.lan.q.peers = self.peers[0][2].lan.q.peers - lq.lan.q.jobs = self.peers[0][2].lan.q.jobs + lq.lan.q.jobs = lqjobs + lq.lan.q.peers = lqpeers + lq.update_peer_state(lq.addr, "status", "run", profile) self.peers.append((d, mq, lq, db)) def test_ordered_acquisition(self): @@ -347,6 +433,48 @@ def test_ordered_acquisition(self): d2.action(DA.SUCCESS, DP.IDLE) # -> idle self.assertEqual(d2.state.__name__, d2._state_idle.__name__) + def test_non_local_edit(self): + (d1, _, lq1, db1) = self.peers[0] + (d2, _, lq2, db2) = self.peers[1] + with tempfile.TemporaryDirectory() as tdir: + (Path(tdir) / "test.gcode").touch() + j = LANJobView( + dict( + id="jobhash", + name="job", + created=0, + sets=[ + dict( + path="test.gcode", + count=1, + remaining=1, + profiles=["profile"], + ), + ], + count=1, + remaining=1, + peer_=None, + ), + lq1, + ) + lq1._path_on_disk = lambda p, sd: str(Path(tdir) / p) + lq1.import_job_from_view(j, j.id) + lq2._fileshare.post.assert_not_called() + + # LQ2 edits the job + lq2._fileshare.fetch.return_value = str(Path(tdir) / "unpack/") + (Path(tdir) / "unpack").mkdir() + lq2dest = Path(tdir) / "unpack/test.gcode" + lq2dest.touch() + lq2.edit_job("jobhash", dict(draft=True)) + + lq2._fileshare.post.assert_called_once() + # Job posts with lan 2 address, from pov of lq1 + self.assertEqual(list(lq1.lan.q.jobs.values())[0][0], lq2.addr) + # Uses resolved file path + c = lq2._fileshare.post.call_args[0] + self.assertEqual(c[1], {str(lq2dest): str(lq2dest)}) + if __name__ == "__main__": unittest.main() diff --git a/continuousprint/plugin.py b/continuousprint/plugin.py index bcb194a..1c98ec7 100644 --- a/continuousprint/plugin.py +++ b/continuousprint/plugin.py @@ -23,7 +23,8 @@ from .queues.abstract import Strategy from .storage.database import ( migrateFromSettings, - init as init_db, + migrateScriptsFromSettings, + init_db, DEFAULT_QUEUE, ARCHIVE_QUEUE, ) @@ -31,8 +32,8 @@ PRINTER_PROFILES, GCODE_SCRIPTS, Keys, + TEMP_FILE_DIR, PRINT_FILE_DIR, - TEMP_FILES, ) from .api import ContinuousPrintAPI from .script_runner import ScriptRunner @@ -49,6 +50,7 @@ class CPQPlugin(ContinuousPrintAPI): ) RECONNECT_WINDOW_SIZE = 5.0 MAX_WINDOW_EXP = 6 + GET_ADDR_TIMEOUT = 3 CPQ_ANALYSIS_FINISHED = "CPQ_ANALYSIS_FINISHED" def __init__( @@ -77,6 +79,8 @@ def __init__( self._reconnect_attempts = 0 self._next_reconnect = 0 self._fire_event = fire_event + self._exceptions = [] + self._timelapse_start_ts = None def start(self): self._setup_thirdparty_plugin_integration() @@ -117,25 +121,61 @@ def resume_action(self): self._update(DA.ACTIVATE) self._sync_state() - def get_open_port(self): + def _can_bind_addr(self, addr): + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.bind(addr) + except OSError: + return False + finally: + s.close() + return True + + def get_local_addr(self): # https://stackoverflow.com/a/2838309 # Note that this is vulnerable to race conditions in that # the port is open when it's assigned, but could be reassigned # before the caller can use it. - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.bind(("", 0)) - s.listen(1) - port = s.getsockname()[1] - s.close() - return port - - def get_local_ip(self): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.settimeout(CPQPlugin.GET_ADDR_TIMEOUT) + checkaddr = tuple( + [ + self._settings.global_get(["server", "onlineCheck", v]) + for v in ("host", "port") + ] + ) + try: + s.connect(checkaddr) + result = s.getsockname() + finally: + # Note: close has no effect on already closed socket + s.close() + + if self._can_bind_addr(result): + return f"{result[0]}:{result[1]}" + + # For whatever reason, client-based local IP resolution can sometimes still fail to bind. + # In this case, fall back to MDNS based resolution # https://stackoverflow.com/a/57355707 + self._logger.warning( + "Online check based IP resolution failed to bind; attempting MDNS local IP resolution" + ) hostname = socket.gethostname() try: - return socket.gethostbyname(f"{hostname}.local") + local_ip = socket.gethostbyname(f"{hostname}.local") except socket.gaierror: - return socket.gethostbyname(hostname) + local_ip = socket.gethostbyname(hostname) + + # Find open port: https://stackoverflow.com/a/2838309 + # This will raise OSError if it cannot bind to that address either + try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind((local_ip, 0)) + s.listen(1) + port = s.getsockname()[1] + finally: + s.close() + return f"{local_ip}:{port}" def _add_set(self, path, sd, draft=True, profiles=[]): # We may need to delay adding a file if it hasn't yet finished analysis @@ -190,6 +230,9 @@ def _preprocess_set(self, data): data["profiles"] = [prof] return data + def _set_external_symbols(self, data): + self._runner.set_external_symbols(data) + def _path_on_disk(self, path: str, sd: bool): try: return self._file_manager.path_on_disk( @@ -205,6 +248,15 @@ def _msg(self, data): # See continuousprint_viewmodel.js onDataUpdaterPluginMessage self._plugin_manager.send_plugin_message(self._identifier, data) + def get_exceptions(self): + return self._exceptions + + def _exception_msg(self, msg): + self._logger.error(msg) + self._exceptions.append(msg) + self._logger.error(traceback.format_exc()) + self._msg(dict(msg=msg + "\n\nSee sysinfo logs for details", type="danger")) + def _setup_thirdparty_plugin_integration(self): # Turn on "restart on pause" when Obico plugin is detected (must be version 1.8.11 or higher for custom event hook) if getattr(octoprint.events.Events, "PLUGIN_OBICO_COMMAND", None) is not None: @@ -246,28 +298,65 @@ def _init_fileshare(self, fs_cls=Fileshare): self.fileshare_dir = self._path_on_disk( f"{PRINT_FILE_DIR}/fileshare/", sd=False ) - fileshare_addr = f"{self.get_local_ip()}:0" + try: + fileshare_addr = self.get_local_addr() + except OSError: + self._exception_msg( + "Failed to find a local addr for LAN fileshare; LAN queues will be disabled." + ) + self._fileshare = None + return + self._logger.info(f"Starting fileshare with address {fileshare_addr}") self._fileshare = fs_cls(fileshare_addr, self.fileshare_dir, self._logger) - self._fileshare.connect() + try: + self._fileshare.connect() + self._logger.info( + f"Fileshare listening on {self._fileshare.host}:{self._fileshare.port}" + ) + except OSError: + self._exception_msg( + "Failed to bind Fileshare HTTP server; hosting LAN jobs will fail, but fetching jobs may still work." + ) def _init_db(self): init_db( - db_path=Path(self._data_folder) / "queue.sqlite3", + queues_db=Path(self._data_folder) / "queue.sqlite3", + automation_db=Path(self._data_folder) / "automation.sqlite3", logger=self._logger, ) # Migrate from old JSON state if needed - state_data = self._get_key(Keys.QUEUE) + state_data = self._get_key(Keys.QUEUE_DEPRECATED) try: if state_data is not None and state_data != "[]": settings_state = json.loads(state_data) migrateFromSettings(settings_state) - self._get_key(Keys.QUEUE) + self._set_key(Keys.QUEUE_DEPRECATED, None) + self._logger.info("Migrated queue data from settings to DB") except Exception: self._logger.error(f"Could not migrate old json state: {state_data}") self._logger.error(traceback.format_exc()) + # Migrate from settings scripts if needed + dep_scripts = [ + Keys.CLEARING_SCRIPT_DEPRECATED, + Keys.FINISHED_SCRIPT_DEPRECATED, + Keys.BED_COOLDOWN_SCRIPT_DEPRECATED, + ] + script_data = [self._get_key(k) for k in dep_scripts] + try: + if True in [s is not None for s in script_data]: + migrateScriptsFromSettings(*script_data) + for k in dep_scripts: + self._set_key(k, None) + self._logger.info("Migrated scripts from settings to DB") + except Exception: + self._logger.error( + f"Could not migrate from settings scripts: {script_data}" + ) + self._logger.error(traceback.format_exc()) + self._queries.clearOldState() def _init_queues(self, lancls=LANQueue, localcls=LocalQueue): @@ -282,9 +371,7 @@ def _init_queues(self, lancls=LANQueue, localcls=LocalQueue): try: lq = lancls( q.name, - q.addr - if q.addr.lower() != "auto" - else f"{self.get_local_ip()}:{self.get_open_port()}", + q.addr if q.addr.lower() != "auto" else self.get_local_addr(), self._logger, Strategy.IN_ORDER, self._on_queue_update, @@ -294,10 +381,11 @@ def _init_queues(self, lancls=LANQueue, localcls=LocalQueue): ) lq.connect() self.q.add(q.name, lq) - except ValueError: - self._logger.error( - f"Unable to join network queue (name {q.name}, addr {q.addr}) due to ValueError" + except Exception: + self._exception_msg( + f'Unable to join network queue "{q.name}" with address {q.addr}' ) + elif q.name != ARCHIVE_QUEUE: self.q.add( q.name, @@ -314,7 +402,6 @@ def _init_queues(self, lancls=LANQueue, localcls=LocalQueue): def _init_driver(self, srcls=ScriptRunner, dcls=Driver): self._runner = srcls( self.popup, - self._get_key, self._file_manager, self._logger, self._printer, @@ -409,7 +496,7 @@ def _enqueue_analysis_backlog(self): self._logger.info(f"Enqueued {counter} files for CPQ analysis") def _enqueue(self, path, high_priority=False): - if path in TEMP_FILES.values(): + if path.startswith(TEMP_FILE_DIR): return False # Exclude temp files from analysis queue_entry = QueueEntry( name=path.split("/")[-1], @@ -433,6 +520,9 @@ def _on_analysis_finished(self, entry, result): self.on_event(self.CPQ_ANALYSIS_FINISHED, dict(path=entry.path, result=result)) def _cleanup_fileshare(self): + if not os.path.exists(self.fileshare_dir): + return n + # This cleans up all non-useful fileshare files across all network queues, so they aren't just taking up space. # First we collect all non-local queue items hosted by us - these are excluded from cleanup as someone may need to fetch them. keep_hashes = set() @@ -445,6 +535,7 @@ def _cleanup_fileshare(self): # Loop through all .gjob and .gcode files in base directory and delete them if they aren't referenced or acquired by us n = 0 + for d in os.listdir(self.fileshare_dir): name, suffix = os.path.splitext(d) if suffix not in ("", ".gjob", ".gcode", ".gco"): @@ -539,12 +630,11 @@ def on_event(self, event, payload): self._sync_state() else: return - if event == Events.MOVIE_DONE: + self._timelapse_start_ts = None # Optionally delete time-lapses created from bed clearing/finishing scripts - temp_files_base = [f.split("/")[-1] for f in TEMP_FILES.values()] if ( - payload["gcode"] in temp_files_base + payload["gcode"].startswith(TEMP_FILE_DIR) and self._get_key(Keys.AUTOMATION_TIMELAPSE_ACTION) == "auto_remove" ): if self._delete_timelapse(payload["movie"]): @@ -561,9 +651,17 @@ def on_event(self, event, payload): f"Annotated run of {payload['gcode']} with timelapse details" ) self._sync_history() + self._update(DA.TICK) + return + elif event == Events.MOVIE_FAILED: + self._timelapse_start_ts = None return - elif event == Events.PRINT_DONE: + # Timelapse TS is set here instead of on MOVIE_RENDERING, because we + # must know when we're rendering *before* calling _update(SUCCESS) + # so the driver can appropriately wait for the timelapse to finish. + self._timelapse_start_ts = time.time() + self._update(DA.SUCCESS) n = self._cleanup_fileshare() if n > 0: @@ -638,7 +736,14 @@ def _update(self, a: DA): if bed_temp is not None: bed_temp = bed_temp.get("actual", 0) - if self.d.action(a, p, path, materials, bed_temp): + timelapse_start_ts = None + timelapsesEnabled = ( + self._settings.global_get(["webcam", "timelapse", "type"]) != "off" + ) + if timelapsesEnabled: + timelapse_start_ts = self._timelapse_start_ts + + if self.d.action(a, p, path, materials, bed_temp, timelapse_start_ts): self._sync_state() run = self.q.get_run() @@ -688,9 +793,7 @@ def _commit_queues(self, added, removed): try: lq = LANQueue( a["name"], - a["addr"] - if a["addr"].lower() != "auto" - else f"{self.get_local_ip()}:{self.get_open_port()}", + a["addr"] if a["addr"].lower() != "auto" else self.get_local_addr(), self._logger, Strategy.IN_ORDER, self._on_queue_update, diff --git a/continuousprint/plugin_test.py b/continuousprint/plugin_test.py index 01fc8b6..abea5f1 100644 --- a/continuousprint/plugin_test.py +++ b/continuousprint/plugin_test.py @@ -11,7 +11,7 @@ import logging import tempfile import json -from .data import Keys, TEMP_FILES +from .data import Keys, TEMP_FILE_DIR from .plugin import CPQPlugin # logging.basicConfig(level=logging.DEBUG) @@ -27,9 +27,15 @@ def save(self): def get(self, k): return self.s.get(k[0]) + def global_get(self, gk): + return self.get([":".join(gk)]) + def set(self, k, v): self.s[k[0]] = v + def global_set(self, gk, v): + return self.set([":".join(gk)], v) + def mockplugin(): return CPQPlugin( @@ -84,10 +90,22 @@ def testDBNew(self): p._data_folder = td p._init_db() + @patch("continuousprint.plugin.migrateScriptsFromSettings") + def testDBMigrateScripts(self, msfs): + p = mockplugin() + p._set_key(Keys.CLEARING_SCRIPT_DEPRECATED, "s1") + p._set_key(Keys.FINISHED_SCRIPT_DEPRECATED, "s2") + p._set_key(Keys.BED_COOLDOWN_SCRIPT_DEPRECATED, "s3") + with tempfile.TemporaryDirectory() as td: + p._data_folder = td + p._init_db() + # Ensure we're calling with the script body, not just the event name + msfs.assert_called_with("s1", "s2", "s3") + def testDBWithLegacySettings(self): p = mockplugin() p._set_key( - Keys.QUEUE, + Keys.QUEUE_DEPRECATED, json.dumps( [ { @@ -113,13 +131,28 @@ def testDBWithLegacySettings(self): def testFileshare(self): p = mockplugin() fs = MagicMock() - p.get_local_ip = MagicMock(return_value="111.111.111.111") + p.get_local_addr = lambda: ("111.111.111.111:0") p._file_manager.path_on_disk.return_value = "/testpath" p._init_fileshare(fs_cls=fs) fs.assert_called_with("111.111.111.111:0", "/testpath", logging.getLogger()) + def testFileshareAddrFailure(self): + p = mockplugin() + fs = MagicMock() + p.get_local_addr = MagicMock(side_effect=[OSError("testing")]) + p._init_fileshare(fs_cls=fs) # Does not raise exception + self.assertEqual(p._fileshare, None) + + def testFileshareConnectFailure(self): + p = mockplugin() + fs = MagicMock() + p.get_local_addr = lambda: "111.111.111.111:0" + fs.connect.side_effect = OSError("testing") + p._init_fileshare(fs_cls=fs) # Does not raise exception + self.assertEqual(p._fileshare, fs()) + def testQueues(self): p = mockplugin() QT = namedtuple("MockQueue", ["name", "addr"]) @@ -155,7 +188,7 @@ def setUp(self): def testTick(self): self.p.tick() - self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY, ANY) def testTickExceptionHandled(self): self.p.d.action.side_effect = Exception( @@ -238,7 +271,7 @@ def testTempFileMovieDone(self): self.p._delete_timelapse = MagicMock() self.p.on_event( Events.MOVIE_DONE, - dict(gcode=list(TEMP_FILES.values())[0].split("/")[-1], movie="test.mp4"), + dict(gcode=TEMP_FILE_DIR + "/test.gcode", movie="test.mp4"), ) self.p._delete_timelapse.assert_called_with("test.mp4") @@ -250,19 +283,19 @@ def testQueueRunMovieDone(self): def testPrintDone(self): self.p._cleanup_fileshare = lambda: 0 self.p.on_event(Events.PRINT_DONE, dict()) - self.p.d.action.assert_called_with(DA.SUCCESS, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.SUCCESS, ANY, ANY, ANY, ANY, ANY) def testPrintFailed(self): self.p.on_event(Events.PRINT_FAILED, dict()) - self.p.d.action.assert_called_with(DA.FAILURE, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.FAILURE, ANY, ANY, ANY, ANY, ANY) def testPrintCancelledByUser(self): self.p.on_event(Events.PRINT_CANCELLED, dict(user="admin")) - self.p.d.action.assert_called_with(DA.DEACTIVATE, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.DEACTIVATE, ANY, ANY, ANY, ANY, ANY) def testPrintCancelledBySystem(self): self.p.on_event(Events.PRINT_CANCELLED, dict()) - self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY, ANY) def testObicoPauseCommand(self): self.p._printer.get_current_job.return_value = dict( @@ -272,7 +305,7 @@ def testObicoPauseCommand(self): self.p.EVENT_OBICO_COMMAND = "obico_cmd" self.p.on_event("obico_cmd", dict(cmd="pause", initiator="system")) - self.p.d.action.assert_called_with(DA.SPAGHETTI, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.SPAGHETTI, ANY, ANY, ANY, ANY, ANY) def testObicoPauseByUser(self): # User pause events (e.g. through the Obico UI) should not trigger automation @@ -288,12 +321,12 @@ def testObicoPauseByUser(self): def testSpoolSelected(self): self.p.EVENT_SPOOL_SELECTED = "spool_selected" self.p.on_event("spool_selected", dict()) - self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY, ANY) def testSpoolDeselected(self): self.p.EVENT_SPOOL_DESELECTED = "spool_desel" self.p.on_event("spool_desel", dict()) - self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY, ANY) def testPrintPaused(self): self.p._printer.get_current_job.return_value = dict( @@ -301,7 +334,7 @@ def testPrintPaused(self): ) self.p.d.current_path.return_value = "test.gcode" self.p.on_event(Events.PRINT_PAUSED, dict()) - self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY, ANY) def testPrintResumed(self): self.p._printer.get_current_job.return_value = dict( @@ -309,12 +342,12 @@ def testPrintResumed(self): ) self.p.d.current_path.return_value = "test.gcode" self.p.on_event(Events.PRINT_RESUMED, dict()) - self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY, ANY) def testPrinterOperational(self): self.p._printer.get_state_id.return_value = "OPERATIONAL" self.p.on_event(Events.PRINTER_STATE_CHANGED, dict()) - self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY) + self.p.d.action.assert_called_with(DA.TICK, ANY, ANY, ANY, ANY, ANY) def testSettingsUpdated(self): self.p.on_event(Events.SETTINGS_UPDATED, dict()) @@ -551,3 +584,25 @@ def testCleanupWithFiles(self): self.assertTrue((p / f"{n}.gcode").exists()) self.assertFalse((p / "c.gcode").exists()) self.assertFalse((p / "d").exists()) + + +class TestLocalAddressResolution(unittest.TestCase): + def setUp(self): + self.p = mockplugin() + + @patch("continuousprint.plugin.socket") + def testResolutionViaCheckAddrOK(self, msock): + self.p._settings.global_set(["server", "onlineCheck", "host"], "checkhost") + self.p._settings.global_set(["server", "onlineCheck", "port"], 5678) + s = msock.socket() + s.getsockname.return_value = ("1.2.3.4", "1234") + self.assertEqual(self.p.get_local_addr(), "1.2.3.4:1234") + s.connect.assert_called_with(("checkhost", 5678)) + + @patch("continuousprint.plugin.socket") + def testResolutionFailoverToMDNS(self, msock): + self.p._can_bind_addr = lambda a: False + msock.gethostbyname.return_value = "1.2.3.4" + s = msock.socket() + s.getsockname.return_value = ("ignored", "1234") + self.assertEqual(self.p.get_local_addr(), "1.2.3.4:1234") diff --git a/continuousprint/queues/abstract_test.py b/continuousprint/queues/abstract_test.py index ab287cf..979cdb7 100644 --- a/continuousprint/queues/abstract_test.py +++ b/continuousprint/queues/abstract_test.py @@ -14,6 +14,7 @@ def testJob(inst): s.rank = 0 s.sd = False s.material_keys = "" + s.metadata = None s.profile_keys = "profile" s.completed = 0 s.save = lambda: True diff --git a/continuousprint/queues/lan.py b/continuousprint/queues/lan.py index d1ca046..47ff450 100644 --- a/continuousprint/queues/lan.py +++ b/continuousprint/queues/lan.py @@ -309,6 +309,7 @@ def _gen_uuid(self) -> str: def edit_job(self, job_id, data) -> bool: # For lan queues, "editing" a job is basically resubmission of the whole thing. # This is because the backing .gjob format is a single file containing the full manifest. + j = self.get_job_view(job_id) for (k, v) in data.items(): if k in ("id", "peer_", "queue"): @@ -320,6 +321,13 @@ def edit_job(self, job_id, data) -> bool: else: setattr(j, k, v) + # We must resolve the set paths so we have them locally, as editing can + # also occur on servers other than the one that submitted the job. + j.remap_set_paths() + + # We are also now the source of this job + j.peer = self.addr + # Exchange the old job for the new job (reuse job ID) jid = self.import_job_from_view(j, j.id) return self._get_job(jid) diff --git a/continuousprint/queues/lan_test.py b/continuousprint/queues/lan_test.py index d5cafc3..4bea19c 100644 --- a/continuousprint/queues/lan_test.py +++ b/continuousprint/queues/lan_test.py @@ -20,12 +20,17 @@ class LANQueueTest(unittest.TestCase, PeerPrintLANTest): def setUp(self): PeerPrintLANTest.setUp(self) # Generate peerprint LANQueue as self.q self.q.q.syncPeer( - dict(profile=dict(name="profile")), addr=self.q.q.addr + dict( + profile=dict(name="profile"), + fs_addr="mock_fs_addr", + ), + addr=self.q.q.addr, ) # Helps pass validation ppq = self.q # Rename to make way for CPQ LANQueue self.ucb = MagicMock() self.fs = MagicMock() + self.fs.fetch.return_value = "asdf.gcode" self.q = LANQueue( "ns", "localhost:1234", diff --git a/continuousprint/queues/local_test.py b/continuousprint/queues/local_test.py index 386aa88..1153b40 100644 --- a/continuousprint/queues/local_test.py +++ b/continuousprint/queues/local_test.py @@ -1,6 +1,6 @@ import unittest import logging -from ..storage.database_test import DBTest +from ..storage.database_test import QueuesDBTest from ..storage import queries from ..storage.lan import LANJobView from ..storage.database import JobView @@ -14,13 +14,13 @@ from .local import LocalQueue from dataclasses import dataclass, asdict -logging.basicConfig(level=logging.DEBUG) +# logging.basicConfig(level=logging.DEBUG) -class TestAbstractImpl(AbstractQueueTests, DBTest): +class TestAbstractImpl(AbstractQueueTests, QueuesDBTest): # See abstract_test.py for actual test cases def setUp(self): - DBTest.setUp(self) + QueuesDBTest.setUp(self) self.q = LocalQueue( queries, "local", @@ -33,10 +33,10 @@ def setUp(self): self.q._set_path_exists = lambda p: True -class TestEditableImpl(EditableQueueTests, DBTest): +class TestEditableImpl(EditableQueueTests, QueuesDBTest): # See abstract_test.py for actual test cases def setUp(self): - DBTest.setUp(self) + QueuesDBTest.setUp(self) self.q = LocalQueue( queries, "local", @@ -115,7 +115,7 @@ def test_acquire_failed_no_jobs(self): self.assertEqual(self.q.acquire(), False) def test_import_job(self): - pass # TODO + self.skipTest("TODO") class TestLocalQueueInOrderInitial(unittest.TestCase): @@ -140,7 +140,7 @@ def test_init_already_acquired(self): self.assertEqual(self.q.get_set(), self.s) def test_mv(self): - pass # TODO + self.skipTest("TODO") def test_acquire_2x(self): # Second acquire should do nothing, return True @@ -181,13 +181,12 @@ def as_dict(self): ) -# TODO test SD card behavior on importing/exporting and printing -# class TestSD(unittest.TestCase): -# def testSDExport(self): -# raise NotImplementedError -# -# def testSDImport(self): -# raise NotImplementedError -# -# def testSDPrintExists(self): -# raise NotImplementedError +class TestSD(unittest.TestCase): + def testSDExport(self): + self.skipTest("TODO") + + def testSDImport(self): + self.skipTest("TODO") + + def testSDPrintExists(self): + self.skipTest("TODO") diff --git a/continuousprint/script_runner.py b/continuousprint/script_runner.py index 78eef1c..73e4f2f 100644 --- a/continuousprint/script_runner.py +++ b/continuousprint/script_runner.py @@ -1,18 +1,21 @@ import time -from io import BytesIO +from io import BytesIO, StringIO +from asteval import Interpreter +from pathlib import Path from octoprint.filemanager.util import StreamWrapper from octoprint.filemanager.destinations import FileDestinations from octoprint.printer import InvalidFileLocation, InvalidFileType +from octoprint.server import current_user from .storage.lan import ResolveError -from .data import Keys, TEMP_FILES, CustomEvents +from .data import TEMP_FILE_DIR, CustomEvents +from .storage.queries import genEventScript class ScriptRunner: def __init__( self, msg, - get_key, file_manager, logger, printer, @@ -20,20 +23,29 @@ def __init__( fire_event, ): self._msg = msg - self._get_key = get_key self._file_manager = file_manager self._logger = logger self._printer = printer self._refresh_ui_state = refresh_ui_state self._fire_event = fire_event + self._symbols = dict( + current=dict(), + external=dict(), + metadata=dict(), + ) + + def _get_user(self): + try: + return current_user.get_name() + except AttributeError: + return None def _wrap_stream(self, name, gcode): return StreamWrapper(name, BytesIO(gcode.encode("utf-8"))) - def _execute_gcode(self, key): - gcode = self._get_key(key) - file_wrapper = self._wrap_stream(key.setting, gcode) - path = TEMP_FILES[key.setting] + def _execute_gcode(self, evt, gcode): + file_wrapper = self._wrap_stream(evt.event, gcode) + path = str(Path(TEMP_FILE_DIR) / f"{evt.event}.gcode") added_file = self._file_manager.add_file( FileDestinations.LOCAL, path, @@ -41,30 +53,91 @@ def _execute_gcode(self, key): allow_overwrite=True, ) self._logger.info(f"Wrote file {path}") - self._printer.select_file(path, sd=False, printAfterSelect=True) + self._printer.select_file( + path, sd=False, printAfterSelect=True, user=self._get_user() + ) return added_file - def run_finish_script(self): - self._msg("Print Queue Complete", type="complete") - result = self._execute_gcode(Keys.FINISHED_SCRIPT) - self._fire_event(CustomEvents.FINISH) - return result + def _do_msg(self, evt, running=False): + if evt == CustomEvents.FINISH: + self._msg("Print Queue Complete", type="complete") + elif evt == CustomEvents.PRINT_CANCEL: + self._msg("Print cancelled", type="error") + + if running: + if evt == CustomEvents.COOLDOWN: + self._msg("Running bed cooldown script") + elif evt == CustomEvents.PRINT_SUCCESS: + self._msg("Running success script") + elif evt == CustomEvents.AWAITING_MATERIAL: + self._msg("Running script while awaiting material") + + def set_current_symbols(self, symbols): + last_path = self._symbols["current"].get("path") + self._symbols["current"] = symbols.copy() - def cancel_print(self): - self._msg("Print cancelled", type="error") - self._printer.cancel_print() - self._fire_event(CustomEvents.CANCEL) + # Current state can change metadata + path = self._symbols["current"].get("path") + if ( + path is not None + and path != last_path + and self._file_manager.file_exists(FileDestinations.LOCAL, path) + and self._file_manager.has_analysis(FileDestinations.LOCAL, path) + ): + # See https://docs.octoprint.org/en/master/modules/filemanager.html#octoprint.filemanager.analysis.GcodeAnalysisQueue + # for analysis values - or `.metadata.json` within .octoprint/uploads + self._symbols["metadata"] = self._file_manager.get_metadata( + FileDestinations.LOCAL, path + ) - def start_cooldown(self): - self._msg("Running bed cooldown script") - self._execute_gcode(Keys.BED_COOLDOWN_SCRIPT) - self._printer.set_temperature("bed", 0) # turn bed off - self._fire_event(CustomEvents.COOLDOWN) + def set_external_symbols(self, symbols): + assert type(symbols) is dict + self._symbols["external"] = symbols - def clear_bed(self): - self._msg("Clearing bed") - self._execute_gcode(Keys.CLEARING_SCRIPT) - self._fire_event(CustomEvents.CLEAR_BED) + def _get_interpreter(self): + out = StringIO() + err = StringIO() + interp = Interpreter(writer=out, err_writer=err) + # Merge in so default symbols (e.g. exceptions) are retained + for (k, v) in self._symbols.items(): + interp.symtable[k] = v + return interp, out, err + + def run_script_for_event(self, evt, msg=None, msgtype=None): + interp, out, err = self._get_interpreter() + gcode = genEventScript(evt, interp, self._logger) + if len(interp.error) > 0: + for err in interp.error: + self._logger.error(err.get_error()) + self._msg( + f"CPQ {evt.displayName} Preprocessor:\n{err.get_error()}", + type="error", + ) + gcode = "@pause" # Exceptions mean we must wait for the user to act + else: + err.seek(0) + err_output = err.read().strip() + if len(err_output) > 0: + self._logger.error(err_output) + out.seek(0) + interp_output = out.read().strip() + if len(interp_output) > 0: + self._msg(f"CPQ {evt.displayName} Preprocessor:\n{interp_output}") + else: + self._do_msg(evt, running=(gcode != "")) + + # Cancellation happens before custom scripts are run + if evt == CustomEvents.PRINT_CANCEL: + self._printer.cancel_print() + + result = self._execute_gcode(evt, gcode) if gcode != "" else None + + # Bed cooldown turn-off happens after custom scripts are run + if evt == CustomEvents.COOLDOWN: + self._printer.set_temperature("bed", 0) # turn bed off + + self._fire_event(evt) + return result def start_print(self, item): self._msg(f"{item.job.name}: printing {item.path}") @@ -86,8 +159,10 @@ def start_print(self, item): try: self._logger.info(f"Attempting to print {path} (sd={item.sd})") - self._printer.select_file(path, sd=item.sd, printAfterSelect=True) - self._fire_event(CustomEvents.START_PRINT) + self._printer.select_file( + path, sd=item.sd, printAfterSelect=True, user=self._get_user() + ) + self._fire_event(CustomEvents.PRINT_START) except InvalidFileLocation as e: self._logger.error(e) self._msg("File not found: " + path, type="error") diff --git a/continuousprint/script_runner_test.py b/continuousprint/script_runner_test.py index 163b446..af5f4b0 100644 --- a/continuousprint/script_runner_test.py +++ b/continuousprint/script_runner_test.py @@ -1,12 +1,15 @@ import unittest +from io import StringIO from octoprint.printer import InvalidFileLocation, InvalidFileType from collections import namedtuple -from unittest.mock import MagicMock +from unittest.mock import MagicMock, ANY, patch from .script_runner import ScriptRunner from .data import CustomEvents +from .storage.database_test import AutomationDBTest +from .storage import queries import logging -logging.basicConfig(level=logging.DEBUG) +# logging.basicConfig(level=logging.DEBUG) LI = namedtuple("LocalItem", ["sd", "path", "job"]) LJ = namedtuple("Job", ["name"]) @@ -14,54 +17,63 @@ class TestScriptRunner(unittest.TestCase): def setUp(self): + super().setUp() self.s = ScriptRunner( msg=MagicMock(), - get_key=MagicMock(), file_manager=MagicMock(), logger=logging.getLogger(), printer=MagicMock(), refresh_ui_state=MagicMock(), fire_event=MagicMock(), ) + self.s._get_user = lambda: "foo" self.s._wrap_stream = MagicMock(return_value=None) + self.s._get_interpreter = lambda: (MagicMock(error=[]), StringIO(), StringIO()) - def test_run_finish_script(self): - self.s.run_finish_script() + @patch("continuousprint.script_runner.genEventScript", return_value="foo") + def test_run_script_for_event(self, ges): + # Note: default scripts are populated on db_init for FINISH and PRINT_SUCCESS + self.s.run_script_for_event(CustomEvents.FINISH) self.s._file_manager.add_file.assert_called() self.s._printer.select_file.assert_called_with( - "ContinuousPrint/cp_queue_finished_script.gcode", + "ContinuousPrint/tmp/continuousprint_finish.gcode", sd=False, printAfterSelect=True, + user="foo", ) self.s._fire_event.assert_called_with(CustomEvents.FINISH) - def test_cancel_print(self): - self.s.cancel_print() + @patch("continuousprint.script_runner.genEventScript", return_value="") + def test_run_script_for_event_cancel(self, ges): + # Script run behavior is already tested in test_run_script_for_event + self.s.run_script_for_event(CustomEvents.PRINT_CANCEL) self.s._printer.cancel_print.assert_called() - self.s._fire_event.assert_called_with(CustomEvents.CANCEL) - def test_clear_bed(self): - self.s.clear_bed() - self.s._printer.select_file.assert_called_with( - "ContinuousPrint/cp_bed_clearing_script.gcode", - sd=False, - printAfterSelect=True, - ) - self.s._fire_event.assert_called_with(CustomEvents.CLEAR_BED) + @patch("continuousprint.script_runner.genEventScript", return_value="") + def test_run_script_for_event_cooldown(self, ges): + # Script run behavior is already tested in test_run_script_for_event + self.s.run_script_for_event(CustomEvents.COOLDOWN) + self.s._printer.set_temperature.assert_called_with("bed", 0) def test_start_print_local(self): self.assertEqual(self.s.start_print(LI(False, "a.gcode", LJ("job1"))), True) self.s._printer.select_file.assert_called_with( - "a.gcode", sd=False, printAfterSelect=True + "a.gcode", + sd=False, + printAfterSelect=True, + user="foo", ) - self.s._fire_event.assert_called_with(CustomEvents.START_PRINT) + self.s._fire_event.assert_called_with(CustomEvents.PRINT_START) def test_start_print_sd(self): self.assertEqual(self.s.start_print(LI(True, "a.gcode", LJ("job1"))), True) self.s._printer.select_file.assert_called_with( - "a.gcode", sd=True, printAfterSelect=True + "a.gcode", + sd=True, + printAfterSelect=True, + user="foo", ) - self.s._fire_event.assert_called_with(CustomEvents.START_PRINT) + self.s._fire_event.assert_called_with(CustomEvents.PRINT_START) def test_start_print_lan(self): class NetItem: @@ -74,9 +86,12 @@ def resolve(self): self.assertEqual(self.s.start_print(NetItem()), True) self.s._printer.select_file.assert_called_with( - "net/a.gcode", sd=False, printAfterSelect=True + "net/a.gcode", + sd=False, + printAfterSelect=True, + user="foo", ) - self.s._fire_event.assert_called_with(CustomEvents.START_PRINT) + self.s._fire_event.assert_called_with(CustomEvents.PRINT_START) def test_start_print_invalid_location(self): self.s._printer.select_file.side_effect = InvalidFileLocation() @@ -87,3 +102,65 @@ def test_start_print_invalid_filetype(self): self.s._printer.select_file.side_effect = InvalidFileType() self.assertEqual(self.s.start_print(LI(True, "a.gcode", LJ("job1"))), False) self.s._fire_event.assert_not_called() + + +class TestWithInterpreter(AutomationDBTest): + def setUp(self): + super().setUp() + self.s = ScriptRunner( + msg=MagicMock(), + file_manager=MagicMock(), + logger=logging.getLogger(), + printer=MagicMock(), + refresh_ui_state=MagicMock(), + fire_event=MagicMock(), + ) + self.s._get_user = lambda: "foo" + self.s._wrap_stream = MagicMock(return_value=None) + self.s._execute_gcode = MagicMock() + + def test_injection(self): + queries.assignAutomation( + dict(foo="G0 X{direction}"), + dict(bar="{'direction': 5}"), + {CustomEvents.ACTIVATE.event: [dict(script="foo", preprocessor="bar")]}, + ) + self.s.run_script_for_event(CustomEvents.ACTIVATE) + self.s._msg.assert_not_called() + self.s._execute_gcode.assert_called_with(ANY, "G0 X5") + + def test_symbol_carryover(self): + queries.assignAutomation( + dict(s1="G0 X{direction}"), + dict(p1="d=5; {'direction': d}", p2="d += 5; {'direction': d}"), + { + CustomEvents.ACTIVATE.event: [ + dict(script="s1", preprocessor="p1"), + dict(script="s1", preprocessor="p2"), + dict(script="s1", preprocessor="p2"), + ] + }, + ) + self.s.run_script_for_event(CustomEvents.ACTIVATE) + self.s._execute_gcode.assert_called_with(ANY, "G0 X5\nG0 X10\nG0 X15") + + def test_run_script_has_errors(self): + queries.assignAutomation( + dict(foo="G0 X20"), + dict(bar="raise Exception('testing exception')"), + {CustomEvents.ACTIVATE.event: [dict(script="foo", preprocessor="bar")]}, + ) + self.s.run_script_for_event(CustomEvents.ACTIVATE) + self.s._execute_gcode.assert_called_with(ANY, "@pause") + self.assertRegex(self.s._msg.call_args[0][0], "testing exception") + + def test_run_script_has_output(self): + queries.assignAutomation( + dict(foo="G0 X20"), + dict(bar="print('test message')\nTrue"), + {CustomEvents.ACTIVATE.event: [dict(script="foo", preprocessor="bar")]}, + ) + self.s.run_script_for_event(CustomEvents.ACTIVATE) + self.s._execute_gcode.assert_called_with(ANY, "G0 X20") + self.s._msg.assert_called_once() + self.assertRegex(self.s._msg.call_args[0][0], "test message") diff --git a/continuousprint/scripts/extract_profile.py b/continuousprint/scripts/extract_profile.py index 71896db..2dfbc2a 100644 --- a/continuousprint/scripts/extract_profile.py +++ b/continuousprint/scripts/extract_profile.py @@ -1,5 +1,6 @@ import re import sys +import os from continuousprint.data import PRINTER_PROFILES @@ -19,20 +20,36 @@ def _strip_nonalpha(s: str): class KiriMotoProcessor: @classmethod - def header_match(self, hdr): + def match(self, hdr, ftr): for line in hdr: if line.startswith("; Generated by Kiri:Moto"): return True return False @classmethod - def get_profile(self, hdr) -> str: + def get_profile(self, hdr, ftr) -> str: for line in hdr: if line.startswith("; Target:"): return re.match("; Target: (.*)", line)[1] return "" +class PrusaSlicerProcessor: + @classmethod + def match(self, hdr, ftr): + for line in hdr: + if line.startswith("; generated by PrusaSlicer"): + return True + return False + + @classmethod + def get_profile(self, hdr, ftr) -> str: + for line in ftr: + if line.startswith("; printer_model = "): + return re.match("; printer_model = (.*)", line)[1] + return "" + + def token_string_match(profstr): # Remove non-alpha characters from profile string # Convert all into bag-of-words @@ -47,39 +64,66 @@ def token_string_match(profstr): sys.stderr.write(f"- {p}: {s}\n") sys.stderr.write("- ...\n") max_score = max(scores) - if max_score < 2: + if max_score < 1: return None max_index = scores.index(max_score) return PROFILES[max_index] PROCESSORS = [ - (cls.__name__, cls.header_match, cls.get_profile) for cls in [KiriMotoProcessor] + (cls.__name__, cls.match, cls.get_profile) + for cls in [KiriMotoProcessor, PrusaSlicerProcessor] ] +gcode_move_re = re.compile("^G[012] .*") +gcode_multiline_re = re.compile("\nG[012] .*", re.M) + def get_header(path: str): hdr = [] with open(path) as f: for line in f: - if re.match("^G[012] .*", line): + if line.strip() == "": + continue + if gcode_move_re.match(line): return hdr hdr.append(line) return hdr -def get_profile(hdr: list): - for name, hdr_match, getprof in PROCESSORS: - if hdr_match(hdr): +def get_footer(path: str): + # Adapted from https://stackoverflow.com/a/54278929 + # Skip back until we start seeing gcode + hdr = [] + JUMP = 200 # Num bytes to jump back and search for gcode + with open(path, "rb") as f: + try: # catch OSError in case of a one line file + f.seek(-JUMP, os.SEEK_END) + while not gcode_multiline_re.match(f.read(JUMP).decode("utf8")): + f.seek(-2 * JUMP, os.SEEK_CUR) + f.seek(-JUMP, os.SEEK_END) + for line in f: + ln = line.decode("utf8") + if ln.startswith(";"): + hdr.append(ln) + except OSError: + pass # give up + return hdr + + +def get_profile(hdr: list, ftr: list): + for name, match, getprof in PROCESSORS: + if match(hdr, ftr): sys.stderr.write(f"File matched with {name}\n") - profstr = getprof(hdr) + profstr = getprof(hdr, ftr) return token_string_match(profstr) if __name__ == "__main__": sys.stderr.write("=== Continuous Print Profile Inference ===\n") hdr = get_header(sys.argv[1]) - prof = get_profile(hdr) + ftr = get_footer(sys.argv[1]) + prof = get_profile(hdr, ftr) if prof is not None: sys.stdout.write(prof) sys.stdout.flush() diff --git a/continuousprint/scripts/post_file.py b/continuousprint/scripts/post_file.py index 67d0a25..c1ba6d9 100644 --- a/continuousprint/scripts/post_file.py +++ b/continuousprint/scripts/post_file.py @@ -7,8 +7,6 @@ import threading from pathlib import Path -logging.basicConfig(level=logging.DEBUG) - class Runner: def __init__(self, args): @@ -67,6 +65,7 @@ def cleanup(self): def main(): + logging.basicConfig(level=logging.DEBUG) import argparse parser = argparse.ArgumentParser() diff --git a/continuousprint/scripts/test_extract_profile.py b/continuousprint/scripts/test_extract_profile.py index cae03b6..3611233 100644 --- a/continuousprint/scripts/test_extract_profile.py +++ b/continuousprint/scripts/test_extract_profile.py @@ -1,18 +1,57 @@ import unittest -from .extract_profile import get_profile +import tempfile +from .extract_profile import get_profile, get_header, get_footer class TestProfileInference(unittest.TestCase): def testParameterized(self): - for header, want in [ - ("asdf", None), # Random garbage - ("; Generated by Kiri:Moto\n", None), # Header match but no profile - ("; Generated by Kiri:Moto\n; Target: applesauce", None), # Unknown profile - ("; Generated by Kiri:Moto\n; Target: Creality.CR-30", "Creality CR30"), - ("; Generated by Kiri:Moto\n; Target: Mini Prusa", "Prusa Mini"), - ("; Generated by Kiri:Moto\n; Target: Delta V2", "Monoprice Mini Delta V2"), + for header, footer, want in [ + ("asdf", "", None), # Random garbage + ("; Generated by Kiri:Moto\n", "", None), # Header match but no profile + ( + "; Generated by Kiri:Moto\n; Target: orangesauce", + "", + None, + ), # Unknown profile + ("; Generated by Kiri:Moto\n; Target: Creality.CR-30", "", "Creality CR30"), + ("; Generated by Kiri:Moto\n; Target: Mini Prusa", "", "Prusa Mini"), + ( + "; Generated by Kiri:Moto\n; Target: Delta V2", + "", + "Monoprice Mini Delta V2", + ), + ( + "; generated by PrusaSlicer 2.4.2+win64 on 2022-08-22 at 01:21:14 UTC\n", + "; printer_model = MK3S\n", + "Prusa i3 MK3S+", + ), ]: - with self.subTest(header=header, want=want): + with self.subTest(header=header, footer=footer, want=want): hdr = header.split("\n") - result = get_profile(hdr) + ftr = footer.split("\n") + result = get_profile(hdr, ftr) self.assertEqual(result, want) + + +class TestFileParsing(unittest.TestCase): + def testGetHeader(self): + with tempfile.NamedTemporaryFile() as ntf: + with open(ntf.name, "w") as f: + f.write("; Line 1\n; Line 2\n\n\nG0 X5\n; Another line\n") + + self.assertEqual(get_header(ntf.name), ["; Line 1\n", "; Line 2\n"]) + + def testGetFooterSingleLineFile(self): + with tempfile.NamedTemporaryFile() as ntf: + with open(ntf.name, "w") as f: + f.write("; Line1\n") + self.assertEqual(get_footer(ntf.name), []) + + def testGetFooterLongFile(self): + with tempfile.NamedTemporaryFile() as ntf: + with open(ntf.name, "w") as f: + f.write("; Header1\n; Header2\n") + for i in range(1000): + f.write(f"G0 X{i}\n") + f.write("; Line 1\n; Line 2\n") + self.assertEqual(get_footer(ntf.name), ["; Line 1\n", "; Line 2\n"]) diff --git a/continuousprint/static/css/continuousprint.css b/continuousprint/static/css/continuousprint.css index 2f38e72..70e241d 100644 --- a/continuousprint/static/css/continuousprint.css +++ b/continuousprint/static/css/continuousprint.css @@ -91,11 +91,11 @@ #tab_plugin_continuousprint .job.acquired .job-name { font-weight: bold; } -#tab_plugin_continuousprint .fa-grip-vertical { +#tab_plugin_continuousprint .fa-grip-vertical, #settings_plugin_continuousprint .fa-grip-vertical { opacity: 0.0; margin-left: 1px !important; } -#tab_plugin_continuousprint *:hover > .fa-grip-vertical { +#tab_plugin_continuousprint *:hover > .fa-grip-vertical, #settings_plugin_continuousprint *:hover > .fa-grip-vertical { opacity: 0.5; cursor: grab; } @@ -116,7 +116,7 @@ justify-content: center; align-items: center; } -#tab_plugin_continuousprint .accordion-heading, { +#tab_plugin_continuousprint .accordion-heading { width:100%; display: flex; flex-wrap: nowrap; @@ -196,11 +196,12 @@ font-weight: bold; opacity: 0.5; } +#tab_plugin_continuousprint .sets.draft { + border-bottom: 2px #ccc solid; +} #tab_plugin_continuousprint .job-stats { display: flex; justify-content: space-between; - font-weight: bold; - border-top: 2px #ccc solid; } #tab_plugin_continuousprint .has_title { cursor: help; @@ -261,6 +262,25 @@ #settings_plugin_continuousprint .cpq_title > img { width: 71px; } +#settings_plugin_continuousprint .header-row { + width:100%; + display: flex; + flex-wrap: nowrap; + flex-direction: row; + align-items: center; + min-height: 30px; +} +#settings_plugin_continuousprint .subheader { + opacity: 0.7; + font-style: italic; +} +#settings_plugin_continuousprint .header-row > * { + margin-left: var(--cpq-pad); +} +#settings_plugin_continuousprint .events > h4 { + margin-top: var(--cpq-pad2); + padding-top: var(--cpq-pad2); +} #tab_plugin_continuousprint .queue-header, #settings_continuousprint_queues .queue-header { display: flex; justify-content: space-between; diff --git a/continuousprint/static/js/continuousprint_api.js b/continuousprint/static/js/continuousprint_api.js index 1346fde..79d12a0 100644 --- a/continuousprint/static/js/continuousprint_api.js +++ b/continuousprint/static/js/continuousprint_api.js @@ -7,6 +7,7 @@ class CPAPI { STATE = "state" QUEUES = "queues" HISTORY = "history" + AUTOMATION = "automation" init(loading_vm, err_cb) { this.loading = loading_vm; @@ -17,6 +18,7 @@ class CPAPI { let self = this; if (blocking) { if (self.loading()) { + console.log(`Skipping blocking call to ${url}; another call in progress`); return; } self.loading(true); @@ -52,8 +54,8 @@ class CPAPI { } get(type, cb, err_cb=undefined) { - // History fetching doesn't mess with mutability - let blocking = (type !== this.HISTORY); + // History/scripts fetching doesn't mess with mutability + let blocking = (type !== this.HISTORY && type !== this.AUTOMATION); this._call(type, 'get', undefined, cb, err_cb, blocking); } diff --git a/continuousprint/static/js/continuousprint_job.js b/continuousprint/static/js/continuousprint_job.js index 207acab..9880e0d 100644 --- a/continuousprint/static/js/continuousprint_job.js +++ b/continuousprint/static/js/continuousprint_job.js @@ -14,7 +14,7 @@ if (typeof CPSet === "undefined" || CPSet === null) { // jobs and sets are derived from self.queue, but they must be // observableArrays in order for Sortable to be able to reorder it. -function CPJob(obj, peers, api, profile) { +function CPJob(obj, peers, api, profile, materials) { if (api === undefined) { throw Error("API must be provided when creating CPJob"); } @@ -121,16 +121,144 @@ function CPJob(obj, peers, api, profile) { return v; } + + self.humanize = function(num, unit="") { + // Humanizes numbers by condensing and adding units + let v = ''; + if (num < 1000) { + v = (num % 1 === 0) ? num : num.toFixed(1); + } else if (num < 100000) { + let k = (num/1000); + v = ((k % 1 === 0) ? k : k.toFixed(1)) + 'k'; + } + return v + unit; + }; + + self.humanTime = function(s) { + // Humanizes time values; parameter is seconds + if (s < 60) { + return Math.round(s) + 's'; + } else if (s < 3600) { + return Math.round(s/60) + 'm'; + } else if (s < 86400) { + let h = s/3600; + return ((h % 1 === 0) ? h : h.toFixed(1)) + 'h'; + } else { + let d = s/86400; + return ((d % 1 === 0) ? d : d.toFixed(1)) + 'd'; + } + }; + + self.getMaterialLinearMasses = ko.computed(function() { + let result = []; + for (let m of materials()) { + // Convert density from g/cm^3 to g/mm^3, then multiply by + // filament cross-sectional area (mm^2) to get grams per linear mm + result.push( + (m.density / 1000) * + ((m.diameter / 2)*(m.diameter / 2)*Math.PI) + ); + } + return result; + }); + + self.raw_stats = ko.computed(function() { + let result = {completed: 0, remaining: 0, count: 0}; + for (let qs of self.sets()) { + if (!qs.profile_matches()) { + continue; + } + result.remaining += self._safeParse(qs.remaining()); + result.count += self._safeParse(qs.count()); + result.completed += self._safeParse(qs.completed()); + } + return result; + }); + self.totals = ko.computed(function() { - let r = {count: 0, completed: 0, remaining: 0, total: 0}; + let r = [ + {legend: 'Total items', title: null}, + {legend: 'Total time', title: "Uses Octoprint's file analysis estimate; may be inaccurate"}, + {legend: 'Total mass', title: "Mass is calculated using active spool(s) in SpoolManager"}, + ]; + + let linmasses = self.getMaterialLinearMasses(); + + for (let t of r) { + t.count = 0; + t.completed = 0; + t.remaining = 0; + t.total = 0; + t.error = 0; + } + for (let qs of self.sets()) { - r.remaining += self._safeParse(qs.remaining()); - r.total += self._safeParse(qs.length_remaining()); - r.count += self._safeParse(qs.count()); - r.completed += self._safeParse(qs.completed()); + if (!qs.profile_matches()) { + continue; + } + + let rem = self._safeParse(qs.remaining()) + let tot = self._safeParse(qs.length_remaining()); + let count = self._safeParse(qs.count()); + let cplt = self._safeParse(qs.completed()); + + let meta = qs.metadata; + let ept = meta && meta.estimatedPrintTime + let len = meta && meta.filamentLengths; + + // Update print count totals + r[0].remaining += rem; + r[0].total += tot; + r[0].count += count; + r[0].completed += cplt; + + if (ept === null || ept === undefined) { + r[1].error += 1; + } else { + r[1].remaining += rem * ept; + r[1].total += tot * ept + r[1].count += count * ept; + r[1].completed += cplt * ept; + } + + if (len === null || len === undefined || len.length === 0) { + r[2].error += 1; + } else { + let mass = 0; + for (let i = 0; i < len.length; i++) { + mass += linmasses[i] * len[i]; + } + + if (!isNaN(mass)) { + r[2].remaining += rem * mass; + r[2].total += tot * mass; + r[2].count += count * mass; + r[2].completed += cplt * mass; + } else { + r[2].error += 1; + } + } + + } + // Assign error texts + r[0].error = ''; + r[1].error = (r[1].error > 0) ? `${r[1].error} sets missing time estimates` : ''; + r[2].error = (r[2].error > 0) ? `${r[1].error} errors calculating mass` : ''; + + for (let k of ['remaining', 'total', 'count', 'completed']) { + r[0][k] = self.humanize(r[0][k]); + r[1][k] = self.humanTime(r[1][k]); + r[2][k] = self.humanize(r[2][k], 'g'); } + + // Hide mass details if linmasses is empty (implies SpoolManager not set up) + if (linmasses.length === 0) { + r.splice(2,1); + } + return r; }); + self.checkFraction = ko.computed(function() { return (self.selected()) ? 1 : 0; }); diff --git a/continuousprint/static/js/continuousprint_job.test.js b/continuousprint/static/js/continuousprint_job.test.js index 25d5039..4bface3 100644 --- a/continuousprint/static/js/continuousprint_job.test.js +++ b/continuousprint/static/js/continuousprint_job.test.js @@ -6,6 +6,8 @@ const DATA = { materials: [], count: 2, remaining: 1, + completed: 1, + metadata: JSON.stringify({estimatedPrintTime: 100, filamentLengths: [100]}), }; function sets(nsets = 2) { @@ -16,58 +18,91 @@ function sets(nsets = 2) { return sets; } +function prof() { + return null; +} + function api() { return { edit: jest.fn((_, obj, cb) => cb(obj)), } } +function mats() { + return ko.observableArray([{density: 0.4, diameter: 1.75}]); +} + test('basic observables', () => { - let j = new Job({name: 'bob', sets: sets()}, [], api()); + let j = new Job({name: 'bob', sets: sets()}, [], api(), prof(), mats()); expect(j._name()).toBe('bob'); expect(j.sets().length).not.toBe(0); }); test('onSetModified new', () => { - let j = new Job({sets: sets()}, [], api()); + let j = new Job({sets: sets()}, [], api(), prof(), mats()); j.onSetModified({...DATA, id: 5, path: "asdf"}); expect(j.sets().length).toBe(3); // Added onto the end }); test('onSetModified existing', () => { - let j = new Job({sets: sets()}, [], api()); + let j = new Job({sets: sets()}, [], api(), prof(), mats()); j.onSetModified({...DATA, id: 1, path: "asdf"}); expect(j.sets().length).toBe(2); // Replaced expect(j.sets()[1].path()).toBe('asdf'); }); test('totals', () => { - let j = new Job({count: 3, completed: 1, remaining: 1, sets: sets()}, [], api()); - // 2 jobs done, each with 2 sets of 2 --> 8 - // plus an extra 1 each in current run --> 10 - expect(j.totals()).toStrictEqual({ - completed: 0, - count: 4, - remaining: 2, - total: 2, + let j = new Job({count: 3, completed: 2, remaining: 1, sets: sets()}, [], api(), prof(), mats()); + + let t = j.totals(); + expect(t[0]).toStrictEqual({ + completed: "2", // sets have 1/2 completed this run + count: "4", // 2 sets each with count=2 + remaining: "2", // 2 left in this run, one from each set + total: "2", // 2 pending + error: "", + legend: "Total items", + title: null, + }); + + // Values are as above, but x100 and converted to minutes + expect(t[1]).toStrictEqual({ + completed: "3m", + count: "7m", + remaining: "3m", + total: "3m", + error: "", + legend: "Total time", + title: expect.anything(), + }); + + // Values are as above, but factored by filamentLength, density, and filament diameter + expect(t[2]).toStrictEqual({ + completed: "0.2g", + count: "0.4g", + remaining: "0.2g", + total: "0.2g", + error: "", + legend: "Total mass", + title: expect.anything(), }); }); test('checkFraction', () => { - let j = new Job({sets: sets()}, [], api()); + let j = new Job({sets: sets()}, [], api(), prof(), mats()); expect(j.checkFraction()).toBe(0); j.selected(true); expect(j.checkFraction()).not.toBe(0); }); test('pct_complete', () => { - let j = new Job({count: 5, remaining: 3, sets: sets()}, [], api()); + let j = new Job({count: 5, remaining: 3, sets: sets()}, [], api(), prof(), mats()); expect(j.pct_complete()).toBe('40%'); }); test('editStart', () =>{ let a = api(); - let j = new Job({}, [], a); + let j = new Job({}, [], a, prof(), mats()); j.editStart(); expect(a.edit).toHaveBeenCalled(); expect(j.draft()).toBe(true); @@ -75,7 +110,7 @@ test('editStart', () =>{ test('editEnd', () => { let a = api(); - let j = new Job({draft: true, name: 'bob', count: 2}, [], a); + let j = new Job({draft: true, name: 'bob', count: 2}, [], a, prof(), mats()); j.editEnd(); let call = a.edit.mock.calls[0][1]; expect(call.name).toEqual('bob'); diff --git a/continuousprint/static/js/continuousprint_queue.js b/continuousprint/static/js/continuousprint_queue.js index 1b94631..b194cf6 100644 --- a/continuousprint/static/js/continuousprint_queue.js +++ b/continuousprint/static/js/continuousprint_queue.js @@ -14,7 +14,7 @@ if (typeof CPJob === "undefined" || CPJob === null) { }; } -function CPQueue(data, api, files, profile) { +function CPQueue(data, api, files, profile, materials) { var self = this; self.api = api; self.files = files; @@ -23,7 +23,7 @@ function CPQueue(data, api, files, profile) { self.addr = data.addr; self.jobs = ko.observableArray([]); self._pushJob = function(jdata) { - self.jobs.push(new CPJob(jdata, data.peers, self.api, profile)); + self.jobs.push(new CPJob(jdata, data.peers, self.api, profile, materials)); }; for (let j of data.jobs) { self._pushJob(j); @@ -95,18 +95,18 @@ function CPQueue(data, api, files, profile) { break; case "Unstarted Jobs": for (let j of self.jobs()) { - j.onChecked(j.sets().length !== 0 && j.totals().completed === 0); + j.onChecked(j.sets().length !== 0 && j.raw_stats().completed === 0); } break; case "Incomplete Jobs": for (let j of self.jobs()) { - let t = j.totals(); + let t = j.raw_stats(); j.onChecked(t.remaining > 0 && t.remaining < t.count); } break; case "Completed Jobs": for (let j of self.jobs()) { - j.onChecked(j.sets().length !== 0 && j.totals().remaining == 0); + j.onChecked(j.sets().length !== 0 && j.raw_stats().remaining == 0); } break; default: @@ -262,6 +262,21 @@ function CPQueue(data, api, files, profile) { return false; } + self._extractMetadata = function(path) { + let meta = {estimatedPrintTime: null, filamentLengths: []}; + + let f = self.files.elementByPath(path); + if (f !== null && f !== undefined) { + meta.estimatedPrintTime = (f.gcodeAnalysis || {}).estimatedPrintTime; + + let fila = (f.gcodeAnalysis || {}).filament || {}; + for (let tool of Object.values(fila)) { + meta.filamentLengths.push(tool.length); + } + } + return JSON.stringify(meta); + } + self.addFile = function(data, infer_profile=false) { if (data.path.endsWith('.gjob')) { // .gjob import has a different API path @@ -282,6 +297,7 @@ function CPQueue(data, api, files, profile) { name: data.name, path: data.path, sd: (data.origin !== "local"), + metadata: self._extractMetadata(data.path), count: 1, }; diff --git a/continuousprint/static/js/continuousprint_queue.test.js b/continuousprint/static/js/continuousprint_queue.test.js index a3ec702..6f527c0 100644 --- a/continuousprint/static/js/continuousprint_queue.test.js +++ b/continuousprint/static/js/continuousprint_queue.test.js @@ -1,6 +1,6 @@ const VM = require('./continuousprint_queue'); -function mocks(filename="test.gcode") { +function mockapi(filename="test.gcode") { return { add: jest.fn(), rm: jest.fn(), @@ -9,6 +9,21 @@ function mocks(filename="test.gcode") { }; } +function mockfiles() { + return { + requestData: jest.fn(), + elementByPath: jest.fn(), + }; +} + +function mockprofile() { + return []; +} + +function mockmaterials() { + return ko.observable([]); +} + const DATA = { name: `item`, path: `item.gcode`, @@ -35,7 +50,7 @@ function items(njobs = 1, nsets = 2) { function init(njobs = 1) { return new VM({name:"test", jobs:items(njobs), peers:[ {name: "localhost", profile: {name: "profile"}, status: "IDLE"} - ]}, mocks()); + ]}, mockapi(), mockfiles(), mockprofile(), mockmaterials()); } test('newEmptyJob', () => { @@ -51,7 +66,7 @@ test('setCount allows only positive integers', () => { let v = init(); v.setCount(vm, {target: {value: "-5"}}); v.setCount(vm, {target: {value: "0"}}); - v.setCount(vm, {target: {value: "apple"}}); + v.setCount(vm, {target: {value: "orange"}}); expect(vm.set_count).not.toHaveBeenCalled(); v.setCount(vm, {target: {value: "5"}}); @@ -131,6 +146,7 @@ test('resetSelected', () => { test('addFile (profile inference disabled)', () => { let v = init(njobs=0); + v.files.elementByPath = (p) => { return {gcodeAnalysis: {estimatedPrintTime: 123, filament: [{length: 456}]}}}; v.addFile({name: "foo", path: "foo.gcode", origin: "local", continuousprint: {profile: "testprof"}}); expect(v.api.add).toHaveBeenCalledWith(v.api.SET, { "count": 1, @@ -139,6 +155,7 @@ test('addFile (profile inference disabled)', () => { "name": "foo", "path": "foo.gcode", "sd": false, + "metadata": "{\"estimatedPrintTime\":123,\"filamentLengths\":[456]}", }, expect.any(Function)); }); @@ -153,5 +170,6 @@ test('addFile (profile inference enabled)', () => { "path": "foo.gcode", "sd": false, "profiles": ["testprof"], + "metadata": "{\"estimatedPrintTime\":null,\"filamentLengths\":[]}", }, expect.any(Function)); }); diff --git a/continuousprint/static/js/continuousprint_set.js b/continuousprint/static/js/continuousprint_set.js index 51e230b..bcf810c 100644 --- a/continuousprint/static/js/continuousprint_set.js +++ b/continuousprint/static/js/continuousprint_set.js @@ -27,6 +27,7 @@ function CPSet(data, job, api, profile) { self.expanded = ko.observable(data.expanded); self.mats = ko.observable(data.materials || []); self.profiles = ko.observableArray(data.profiles || []); + self.metadata = (data.metadata) ? JSON.parse(data.metadata) : null; self.profile_matches = ko.computed(function() { let profs = self.profiles(); if (profs.length === 0) { @@ -55,6 +56,7 @@ function CPSet(data, job, api, profile) { remaining: self.remaining(), materials: self.mats(), profiles: self.profiles(), + metadata: data.metadata, }; } self.length_remaining = ko.computed(function() { diff --git a/continuousprint/static/js/continuousprint_set.test.js b/continuousprint/static/js/continuousprint_set.test.js index 9c3ced6..ac7df1f 100644 --- a/continuousprint/static/js/continuousprint_set.test.js +++ b/continuousprint/static/js/continuousprint_set.test.js @@ -15,7 +15,6 @@ function data(count=3) { function api() { return { update: (_, data, cb) => { - console.log(data.material); cb({...data, id: 1, remaining: (data.count || 1), job_remaining: 2, materials: ((data.materials) ? data.materials.split(',') : [])}) }, }; diff --git a/continuousprint/static/js/continuousprint_settings.js b/continuousprint/static/js/continuousprint_settings.js index e6f3c3d..0bda78b 100644 --- a/continuousprint/static/js/continuousprint_settings.js +++ b/continuousprint/static/js/continuousprint_settings.js @@ -6,11 +6,12 @@ if (typeof log === "undefined" || log === null) { }; CP_PRINTER_PROFILES = []; CP_GCODE_SCRIPTS = []; + CP_CUSTOM_EVENTS = []; CP_LOCAL_IP = ''; CPAPI = require('./continuousprint_api'); } -function CPSettingsViewModel(parameters, profiles=CP_PRINTER_PROFILES, scripts=CP_GCODE_SCRIPTS) { +function CPSettingsViewModel(parameters, profiles=CP_PRINTER_PROFILES, default_scripts=CP_GCODE_SCRIPTS, custom_events=CP_CUSTOM_EVENTS) { var self = this; self.PLUGIN_ID = "octoprint.plugins.continuousprint"; self.log = log.getLogger(self.PLUGIN_ID); @@ -38,32 +39,26 @@ function CPSettingsViewModel(parameters, profiles=CP_PRINTER_PROFILES, scripts=C } self.profiles[prof.make][prof.model] = prof; } - self.scripts = {}; - for (let s of scripts) { - self.scripts[s.name] = s.gcode; + self.default_scripts = {}; + for (let s of default_scripts) { + self.default_scripts[s.name] = s.gcode; } // Patch the settings viewmodel to allow for us to block saving when validation has failed. // As of 2022-05-31, 'exchanging()' is only used for display and not for logic. self.settings.exchanging_orig = self.settings.exchanging; self.settings.exchanging = ko.pureComputed(function () { - return self.settings.exchanging_orig() || !self.allValidQueueNames() || !self.allValidQueueAddr(); + return self.settings.exchanging_orig() || + !self.allValidQueueNames() || !self.allValidQueueAddr() || + !self.allUniqueScriptNames() || !self.allUniquePreprocessorNames(); }); - // Queues are stored in the DB; we must fetch them. self.queues = ko.observableArray(); self.queue_fingerprint = null; - self.api.get(self.api.QUEUES, (result) => { - let queues = [] - for (let r of result) { - if (r.name === "archive") { - continue; // Archive is hidden - } - queues.push(r); - } - self.queues(queues); - self.queue_fingerprint = JSON.stringify(queues); - }); + self.scripts = ko.observableArray([]); + self.preprocessors = ko.observableArray([]); + self.events = ko.observableArray([]); + self.scripts_fingerprint = null; self.selected_make = ko.observable(); self.selected_model = ko.observable(); @@ -85,11 +80,190 @@ function CPSettingsViewModel(parameters, profiles=CP_PRINTER_PROFILES, scripts=C return; } let cpset = self.settings.settings.plugins.continuousprint; - cpset.cp_bed_clearing_script(self.scripts[profile.defaults.clearBed]); - cpset.cp_queue_finished_script(self.scripts[profile.defaults.finished]); cpset.cp_printer_profile(profile.name); }; + self.preprocessorSelectOptions = ko.computed(function() { + let result = [{name: '', value: null}, {name: 'Add new...', value: 'ADDNEW'}]; + for (let p of self.preprocessors()) { + result.push({name: p.name(), value: p}); + } + return result; + }); + + function mkScript(name, body, expanded) { + let b = ko.observable(body || ""); + let n = ko.observable(name || ""); + return { + name: n, + body: b, + expanded: ko.observable((expanded === undefined) ? true : expanded), + preview: ko.computed(function() { + let flat = b().replace('\n', ' '); + return (flat.length > 32) ? flat.slice(0, 29) + "..." : flat; + }), + registrations: ko.computed(function() { + let nn = n(); + let result = []; + for (let e of self.events()) { + for (let a of e.actions()) { + let ppname = a.preprocessor(); + if (ppname !== null && ppname.name) { + ppname = ppname.name(); + } + if (a.script.name() === nn || ppname === nn) { + result.push(e.display); + } + } + } + return result; + }), + }; + } + + self.loadScriptsFromProfile = function() { + let profile = (self.profiles[self.selected_make()] || {})[self.selected_model()]; + if (profile === undefined) { + return; + } + self.addScript(`Clear Bed (${profile.name})`, + self.default_scripts[profile.defaults.clearBed], true); + self.addScript(`Finish (${profile.name})`, + self.default_scripts[profile.defaults.finished], true); + } + + self.loadFromFile = function(file, cb) { + // Inspired by https://stackoverflow.com/a/14155586 + if(!window.FileReader) return; + var reader = new FileReader(); + reader.onload = function(evt) { + if(evt.target.readyState != 2) return; + if(evt.target.error) { + alert('Error while reading file'); + return; + } + cb(file.name, evt.target.result, false); + }; + reader.readAsText(file); + }; + self.loadScriptFromFile = (f) => self.loadFromFile(f, self.addScript); + self.loadPreprocessorFromFile = (f) => self.loadFromFile(f, self.addPreprocessor); + + self.downloadFile = function(filename, body) { + // https://stackoverflow.com/a/45831357 + var blob = new Blob([body], {type: 'text/plain'}); + if (window.navigator && window.navigator.msSaveOrOpenBlob) { + window.navigator.msSaveOrOpenBlob(blob, filename); + } else { + var e = document.createEvent('MouseEvents'), + a = document.createElement('a'); + a.download = filename; + a.href = window.URL.createObjectURL(blob); + a.dataset.downloadurl = ['text/plain', a.download, a.href].join(':'); + e.initEvent('click', true, false, window, 0, 0, 0, 0, 0, false, false, false, false, 0, null); + a.dispatchEvent(e); + } + } + self.downloadScript = function(s) { + let n = s.name() + if (!n.endsWith(".gcode")) { + n += ".gcode"; + } + self.downloadFile(n, s.body()); + }; + + self.downloadPreprocessor = function(p) { + let n = p.name() + if (!n.endsWith(".py")) { + n += ".py"; + } + self.downloadFile(n, p.body()); + }; + + self.actionPreprocessorChanged = function(vm) { + if (vm.preprocessor() === "ADDNEW") { + p = self.addPreprocessor("", "", true); + vm.preprocessor(p); + self.gotoTab("scripts"); + } + }; + + self.addScript = function(name, body, expanded) { + let s = mkScript(name, body, expanded); + self.scripts.push(s); + return s; + }; + + self.addPreprocessor = function(name, body, expanded) { + let p = mkScript(name, body, expanded); + self.preprocessors.push(p); + return p; + }; + + self.rmScript = function(s) { + for (let e of self.events()) { + for (let a of e.actions()) { + if (a.script == s) { + e.actions.remove(a); + } + } + } + self.scripts.remove(s); + } + self.rmPreprocessor = function(p) { + for (let e of self.events()) { + for (let a of e.actions()) { + if (a.preprocessor() == p) { + a.preprocessor(null); + } + } + } + self.preprocessors.remove(p); + } + self.gotoScript = function(s) { + s.expanded(true); + self.gotoTab("scripts"); + } + self.gotoTab = function(suffix) { + $(`#settings_continuousprint_tabs a[href="#settings_continuousprint_${suffix}"]`).tab('show'); + } + + self.addAction = function(e, s) { + if (s === null) { + s = self.addScript(); + self.gotoScript(s); + } + e.actions.push({ + script: s, + preprocessor: ko.observable(null), + }); + }; + self.rmAction = function(e, a) { + e.actions.remove(a); + } + self.allUniqueScriptNames = ko.computed(function() { + let names = new Set(); + for (let s of self.scripts()) { + let n = s.name(); + if (names.has(n)) { + return false; + } + names.add(n); + } + return true; + }); + self.allUniquePreprocessorNames = ko.computed(function() { + let names = new Set(); + for (let p of self.preprocessors()) { + let n = p.name(); + if (names.has(n)) { + return false; + } + names.add(n); + } + return true; + }); + self.newBlankQueue = function() { self.queues.push({name: "", addr: "", strategy: ""}); }; @@ -130,22 +304,96 @@ function CPSettingsViewModel(parameters, profiles=CP_PRINTER_PROFILES, scripts=C if (self.settings.settings.plugins.continuousprint.cp_printer_profile() === prof.name) { self.selected_make(prof.make); self.selected_model(prof.model); - return; + break; } } + // Queues and scripts are stored in the DB; we must fetch them whenever + // the settings page is loaded + self.api.get(self.api.QUEUES, (result) => { + let queues = [] + for (let r of result) { + if (r.name === "archive") { + continue; // Archive is hidden + } + queues.push(r); + } + self.queues(queues); + self.queue_fingerprint = JSON.stringify(queues); + }); + + self.api.get(self.api.AUTOMATION, (result) => { + let scripts = {}; + for (let k of Object.keys(result.scripts)) { + scripts[k] = mkScript(k, result.scripts[k], false); + } + self.scripts(Object.values(scripts)); + + let preprocessors = {}; + for (let k of Object.keys(result.preprocessors)) { + preprocessors[k] = mkScript(k, result.preprocessors[k], false); + } + self.preprocessors(Object.values(preprocessors)); + + let events = [] + for (let k of custom_events) { + let actions = []; + for (let a of result.events[k.event] || []) { + actions.push({ + script: scripts[a.script], + preprocessor: ko.observable(preprocessors[a.preprocessor]), + }); + } + events.push({ + ...k, + actions: ko.observableArray(actions), + }); + } + events.sort((a, b) => a.display < b.display); + self.events(events); + self.scripts_fingerprint = JSON.stringify(result); + }); }; // Called automatically by SettingsViewModel self.onSettingsBeforeSave = function() { - let queues = self.queues() - if (JSON.stringify(queues) === self.queue_fingerprint) { - return; // Don't call out to API if we haven't changed anything - } - // Sadly it appears flask doesn't have good parsing of nested POST structures, - // So we pass it a JSON string instead. - self.api.edit(self.api.QUEUES, queues, () => { - // Editing queues causes a UI refresh to the main viewmodel; no work is needed here - }); + let queues = self.queues(); + if (JSON.stringify(queues) !== self.queue_fingerprint) { + // Sadly it appears flask doesn't have good parsing of nested POST structures, + // So we pass it a JSON string instead. + self.api.edit(self.api.QUEUES, queues, () => { + // Editing queues causes a UI refresh to the main viewmodel; no work is needed here + }); + } + + let scripts = {} + for (let s of self.scripts()) { + scripts[s.name()] = s.body(); + } + let preprocessors = {} + for (let p of self.preprocessors()) { + preprocessors[p.name()] = p.body(); + } + let events = {}; + for (let e of self.events()) { + let ks = []; + for (let a of e.actions()) { + let pp = a.preprocessor() + if (pp !== null) { + pp = pp.name(); + } + ks.push({ + script: a.script.name(), + preprocessor: pp, + }); + } + if (ks.length !== 0) { + events[e.event] = ks; + } + } + let data = {scripts, preprocessors, events}; + if (JSON.stringify(data) !== self.scripts_fingerprint) { + self.api.edit(self.api.AUTOMATION, data, () => {}); + } } self.sortStart = function() { diff --git a/continuousprint/static/js/continuousprint_settings.test.js b/continuousprint/static/js/continuousprint_settings.test.js index 9715e8c..69a62f6 100644 --- a/continuousprint/static/js/continuousprint_settings.test.js +++ b/continuousprint/static/js/continuousprint_settings.test.js @@ -33,6 +33,9 @@ const SCRIPTS = [ }, ]; +const EVENTS = [ + {event: 'e1'}, +]; function mocks() { return [ @@ -53,6 +56,8 @@ function mocks() { onServerConnect: jest.fn(), }, { + AUTOMATION: 'automation', + QUEUES: 'queues', init: jest.fn(), get: jest.fn((_, cb) => cb([])), edit: jest.fn(), @@ -61,72 +66,226 @@ function mocks() { } test('makes are populated', () => { - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); expect(v.printer_makes().length).toBeGreaterThan(1); // Not just "Select one" }); test('models are populated based on selected_make', () => { - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); v.selected_make("Test"); expect(v.printer_models()).toEqual(["-", "Printer"]); }); -test('valid model change updates settings scripts', () => { - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); +test('valid model change updates profile in settings', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); v.selected_make("Test"); v.selected_model("Printer"); v.modelChanged(); - expect(v.settings.settings.plugins.continuousprint.cp_bed_clearing_script).toHaveBeenCalledWith("test1"); - expect(v.settings.settings.plugins.continuousprint.cp_queue_finished_script).toHaveBeenCalledWith("test2"); + expect(v.settings.settings.plugins.continuousprint.cp_printer_profile).toHaveBeenCalledWith("TestPrinter"); +}); + +test('loadScriptsFromProfile', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + v.selected_make("Test"); + v.selected_model("Printer"); + v.loadScriptsFromProfile(); + expect(v.scripts()[0].name()).toMatch(/^Clear Bed.*/); + expect(v.scripts()[1].name()).toMatch(/^Finish.*/); }); test('"auto" address allows submit', () =>{ - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); v.queues.push({name: 'asdf', addr: 'auto'}); v.onSettingsBeforeSave(); expect(v.settings.exchanging()).toEqual(false); }); test('invalid address blocks submit', () =>{ - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); v.queues.push({name: 'asdf', addr: 'something_invalid'}); v.onSettingsBeforeSave(); expect(v.settings.exchanging()).toEqual(true); }); test('valid address allows submit', () =>{ - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); v.queues.push({name: 'asdf', addr: '192.168.1.69:13337'}); v.onSettingsBeforeSave(); expect(v.settings.exchanging()).toEqual(false); }); test('invalid model change is ignored', () => { - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); v.modelChanged(); expect(v.settings.settings.plugins.continuousprint.cp_bed_clearing_script).not.toHaveBeenCalled(); expect(v.settings.settings.plugins.continuousprint.cp_queue_finished_script).not.toHaveBeenCalled(); }); -test('load queues', () => { +test('load queues and scripts on settings view shown', () => { m = mocks(); - m[2].get = (_, cb) => cb([ - {name: "archive"}, - {name: "local", addr: "", strategy:"IN_ORDER"}, - {name: "LAN", addr: "a:1", strategy:"IN_ORDER"}, - ]); - let v = new VM.CPSettingsViewModel(m, PROFILES, SCRIPTS); + m[2].get = function (typ, cb) { + if (typ === m[2].QUEUES) { + cb([ + {name: "archive"}, + {name: "local", addr: "", strategy:"IN_ORDER"}, + {name: "LAN", addr: "a:1", strategy:"IN_ORDER"}, + ]); + } else if (typ === m[2].AUTOMATION) { + cb({ + scripts: {a: 'g1', b: 'g2'}, + preprocessors: {c: 'p1'}, + events: {e1: [{script: 'a', preprocessor: 'c'}]}, + }); + } + }; + let v = new VM.CPSettingsViewModel(m, PROFILES, SCRIPTS, EVENTS); + v.onSettingsShown(); expect(v.queues().length).toBe(2); // Archive excluded }); test('dirty exit commits queues', () => { - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let m = mocks(); + m[2].get = function (typ, cb) { + if (typ === m[2].QUEUES) { + cb([]); + } else if (typ === m[2].AUTOMATION) { + cb({ + scripts: {}, + preprocessors: {}, + events: {}, + }); + } + }; + let v = new VM.CPSettingsViewModel(m, PROFILES, SCRIPTS, EVENTS); + v.onSettingsShown(); v.queues.push({name: 'asdf', addr: ''}); v.onSettingsBeforeSave(); - expect(v.api.edit).toHaveBeenCalled(); + expect(v.api.edit).toHaveBeenCalledWith(m[2].QUEUES, expect.anything(), expect.anything()); }); test('non-dirty exit does not call commitQueues', () => { - let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS); + let m = mocks(); + m[2].get = function (typ, cb) { + if (typ === m[2].QUEUES) { + cb([]); + } else if (typ === m[2].AUTOMATION) { + cb({ + scripts: {}, + preprocessors: {}, + events: {}, + }); + } + }; + let v = new VM.CPSettingsViewModel(m, PROFILES, SCRIPTS, EVENTS); + v.onSettingsShown(); v.onSettingsBeforeSave(); expect(v.api.edit).not.toHaveBeenCalled(); +}); + +test('addPreprocessor, rmPreprocessor', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + let p = v.addPreprocessor(); + expect(v.preprocessors().length).toEqual(1); + + // rmPreprocessor also removes from any events, without deleting the action + v.events([{ + actions: ko.observableArray([ + {script: {name: ko.observable('testscript')}, preprocessor: ko.observable(p)}, + ]) + }]); + v.rmPreprocessor(p); + expect(v.preprocessors().length).toEqual(0); + expect(v.events()[0].actions()[0].preprocessor()).toEqual(null); + +}); + +test('addScript, rmScript', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + v.addScript(); + expect(v.scripts().length).toEqual(1); + + // rmScript also removes the script from any events + v.events([{ + actions: ko.observableArray([ + {script: v.scripts()[0], preprocessor: ko.observable(null)}, + ]) + }]); + v.rmScript(v.scripts()[0]); + expect(v.scripts().length).toEqual(0); + expect(v.events()[0].actions().length).toEqual(0); +}); + +test('addAction, rmAction', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + let e = {"actions": ko.observableArray([])}; + let a = {script:"foo"}; + v.addAction(e, a); + expect(e.actions()[0].script).toEqual(a); + v.rmAction(e, e.actions()[0]); + expect(e.actions().length).toEqual(0); +}); + +test('script or preprocessor naming collision blocks submit', () =>{ + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + v.addScript(); + v.addScript(); + expect(v.settings.exchanging()).toEqual(true); +}); + +test('registrations of script / preprocessor are tracked', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + let s = v.addScript(); + expect(s.registrations()).toEqual([]); + v.events([{ + display: "testevent", + actions: ko.observableArray([ + {script: s, preprocessor: ko.observable(null)}, + ]) + }]); + expect(s.registrations()).toEqual(["testevent"]); +}); +test('loadScriptFromFile, loadPreprocessorFromFile', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + v.loadFromFile = (file, cb) => cb("name", "result", false); + // No file argument needed since using fake loadFromFile + v.loadScriptFromFile(); + let s = v.scripts()[0]; + expect(s.body()).toEqual("result"); + expect(s.name()).toEqual("name"); + v.loadPreprocessorFromFile(); + let p = v.preprocessors()[0]; + expect(p.body()).toEqual("result"); + expect(p.name()).toEqual("name"); +}); +test('downloadScript, downloadPreprocessor', () => { + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + v.downloadFile = jest.fn() + + v.downloadScript({name: ko.observable('foo'), body: ko.observable('bar')}); + expect(v.downloadFile).toHaveBeenCalledWith("foo.gcode", "bar"); + + v.downloadPreprocessor({name: ko.observable('foo'), body: ko.observable('bar')}); + expect(v.downloadFile).toHaveBeenCalledWith("foo.py", "bar"); +}); +test('add new preprocessor from Events tab', () =>{ + let v = new VM.CPSettingsViewModel(mocks(), PROFILES, SCRIPTS, EVENTS); + v.gotoTab = jest.fn() + let s = v.addScript(); + v.events([{ + display: "testevent", + actions: ko.observableArray([ + {script: s, preprocessor: ko.observable(null)}, + ]) + }]); + let a = v.events()[0].actions()[0] + a.preprocessor(null); + v.actionPreprocessorChanged(a); + expect(v.preprocessors().length).toBe(0); + expect(a.preprocessor()).toBe(null); + expect(v.gotoTab).not.toHaveBeenCalled(); + + a.preprocessor('ADDNEW'); + v.actionPreprocessorChanged(a); + expect(v.preprocessors().length).toBe(1); + expect(a.preprocessor()).not.toBe(null); + expect(v.gotoTab).toHaveBeenCalled(); }); diff --git a/continuousprint/static/js/continuousprint_viewmodel.js b/continuousprint/static/js/continuousprint_viewmodel.js index 44232e0..abca6fa 100644 --- a/continuousprint/static/js/continuousprint_viewmodel.js +++ b/continuousprint/static/js/continuousprint_viewmodel.js @@ -65,6 +65,7 @@ function CPViewModel(parameters) { }); }; + // Patch the files panel to allow for adding to queue self.files.add = function(data) { // We first look for any queues with draft jobs - add the file here if so @@ -82,14 +83,16 @@ function CPViewModel(parameters) { let oldRemove = self.files.removeFile; let remove_cb = null; self.files.removeFile = function(data, evt) { - for (let j of self.defaultQueue.jobs()) { - for (let s of j.sets()) { - if (s.path() === data.path) { - remove_cb = () => oldRemove(data, evt); - return self.showRemoveConfirmModal() + try { + for (let j of self.defaultQueue.jobs()) { + for (let s of j.sets()) { + if (s.path() === data.path) { + remove_cb = () => oldRemove(data, evt); + return self.showRemoveConfirmModal() + } } } - } + } catch {} // Fail silently on error, and pass through return oldRemove(data, evt); }; self.rmDialog = $("#cpq_removeConfirmDialog"); @@ -108,6 +111,11 @@ function CPViewModel(parameters) { self._loadState(); // Refresh to get new "file missing" states self.hideRemoveConfirmModal(); }; + self.showSettingsHelp = function() { + console.log(self.settings); + self.settings.show('settings_plugin_continuousprint'); + $(`#settings_plugin_continuousprint a[href="#settings_continuousprint_help"]`).tab('show'); + }; // Patch the files panel to prevent selecting/printing .gjob files let oldEnableSelect = self.files.enableSelect; @@ -174,7 +182,7 @@ function CPViewModel(parameters) { s.expanded = expansions[s.id.toString()]; } } - let cpq = new CPQueue(q, self.api, self.files, self.profile); + let cpq = new CPQueue(q, self.api, self.files, self.profile, self.materials); // Replace draft entries that are still in draft let cpqj = cpq.jobs(); @@ -314,11 +322,9 @@ function CPViewModel(parameters) { break; case "setstate": data = JSON.parse(data["state"]); - console.log("got setstate", data); return self._setState(data); case "sethistory": data = JSON.parse(data["history"]); - console.log("got sethistory", data); return self._setHistory(data); default: theme = "info"; @@ -347,7 +353,7 @@ function CPViewModel(parameters) { continue; } let k = `${spool.material}_${spool.colorName}_#${spool.color.substring(1)}`; - result[k] = {value: k, text: `${spool.material} (${spool.colorName})`}; + result[k] = {value: k, text: `${spool.material} (${spool.colorName})`, density: spool.density || 1.24, diameter: spool.diameter || 1.75}; } self.materials(Object.values(result)); self.badMaterialCount(nbad); @@ -359,7 +365,7 @@ function CPViewModel(parameters) { self.humanize = function(num) { // Humanizes numbers by condensing and adding units if (num < 1000) { - return num.toString() + return (num % 1 === 0) ? num : num.toFixed(1); } else if (num < 100000) { let k = (num/1000); return ((k % 1 === 0) ? k : k.toFixed(1)) + 'k'; diff --git a/continuousprint/static/js/continuousprint_viewmodel.test.js b/continuousprint/static/js/continuousprint_viewmodel.test.js index 5c75add..3a78671 100644 --- a/continuousprint/static/js/continuousprint_viewmodel.test.js +++ b/continuousprint/static/js/continuousprint_viewmodel.test.js @@ -208,6 +208,16 @@ test('removeFile shows dialog', () => { expect(rmfile).not.toHaveBeenCalled(); }); +test('removeFile with exception fails gracefully', () => { + let m = mocks(); + let rmfile = m[2].removeFile; + let v = init(1, m); + v.defaultQueue = null; // Can't call .jobs() on null queue + let data = {path: 'asdf'}; + v.files.removeFile(data, null); // Raises exception + expect(rmfile).toHaveBeenCalledWith(data, null); // But still gets forwarded through +}); + test('removeConfirm calls removeFile', () => { let m = mocks(); let rmfile = m[2].removeFile; diff --git a/continuousprint/storage/database.py b/continuousprint/storage/database.py index 98ffe94..956c37d 100644 --- a/continuousprint/storage/database.py +++ b/continuousprint/storage/database.py @@ -9,12 +9,14 @@ FloatField, DateField, TimeField, + TextField, CompositeKey, JOIN, Check, ) from playhouse.migrate import SqliteMigrator, migrate +from ..data import CustomEvents, PREPROCESSORS from collections import defaultdict import datetime from enum import IntEnum, auto @@ -29,11 +31,46 @@ class DB: # Adding foreign_keys pragma is necessary for ON DELETE behavior queues = SqliteDatabase(None, pragmas={"foreign_keys": 1}) + automation = SqliteDatabase(None, pragmas={"foreign_keys": 1}) +CURRENT_SCHEMA_VERSION = "0.0.4" DEFAULT_QUEUE = "local" LAN_QUEUE = "LAN" ARCHIVE_QUEUE = "archive" +BED_CLEARING_SCRIPT = "Bed Clearing" +FINISHING_SCRIPT = "Finished" +COOLDOWN_SCRIPT = "Managed Cooldown" + + +class Script(Model): + name = CharField(unique=True) + created = DateTimeField(default=datetime.datetime.now) + body = TextField() + + class Meta: + database = DB.automation + + +class Preprocessor(Model): + name = CharField(unique=True) + created = DateTimeField(default=datetime.datetime.now) + body = TextField() + + class Meta: + database = DB.automation + + +class EventHook(Model): + name = CharField() + script = ForeignKeyField(Script, backref="events", on_delete="CASCADE") + preprocessor = ForeignKeyField( + Preprocessor, null=True, backref="events", on_delete="CASCADE" + ) + rank = FloatField() + + class Meta: + database = DB.automation class StorageDetails(Model): @@ -189,6 +226,7 @@ def as_dict(self): return dict( path=self.path, count=self.count, + metadata=self.metadata, materials=self.materials(), profiles=self.profiles(), id=self.id, @@ -205,6 +243,12 @@ class Set(Model, SetView): job = ForeignKeyField(Job, backref="sets", on_delete="CASCADE") rank = FloatField() count = IntegerField(default=1, constraints=[Check("count >= 0")]) + + # Contains JSON of metadata such as print time estimates, filament length + # etc. These are assigned on creation and are + # only as accurate as the provider's ability to analyze the gcode. + metadata = TextField(null=True) + remaining = IntegerField( # Unlike Job, Sets can have remaining > count if the user wants to print # additional sets as a one-off correction (e.g. a print fails) @@ -285,17 +329,83 @@ def file_exists(path: str) -> bool: MODELS = [Queue, Job, Set, Run, StorageDetails] +AUTOMATION = [Script, EventHook, Preprocessor] -def populate(): +def populate_queues(): DB.queues.create_tables(MODELS) - StorageDetails.create(schemaVersion="0.0.3") + StorageDetails.create(schemaVersion=CURRENT_SCHEMA_VERSION) Queue.create(name=LAN_QUEUE, addr="auto", strategy="LINEAR", rank=1) Queue.create(name=DEFAULT_QUEUE, strategy="LINEAR", rank=0) Queue.create(name=ARCHIVE_QUEUE, strategy="LINEAR", rank=-1) -def init(db_path="queues.sqlite3", logger=None): +def populate_automation(): + DB.automation.create_tables(AUTOMATION) + bc = Script.create(name=BED_CLEARING_SCRIPT, body="@pause") + fin = Script.create(name=FINISHING_SCRIPT, body="@pause") + EventHook.create(name=CustomEvents.PRINT_SUCCESS.event, script=bc, rank=0) + EventHook.create(name=CustomEvents.FINISH.event, script=fin, rank=0) + for pp in PREPROCESSORS.values(): + Preprocessor.create(name=pp["name"], body=pp["body"]) + + +def init_db(automation_db, queues_db, logger=None): + init_automation(automation_db, logger) + init_queues(queues_db, logger) + + +def init_automation(db_path, logger=None): + db = DB.automation + needs_init = not file_exists(db_path) + db.init(None) + db.init(db_path) + db.connect() + if needs_init: + if logger is not None: + logger.debug("Initializing automation DB") + populate_automation() + + +def migrateQueuesV2ToV3(details, logger): + # Constraint removal isn't allowed in sqlite, so we have + # to recreate the table and move the entries over. + # We also added a new `completed` field, so some calculation is needed. + class TempSet(Set): + pass + + if logger is not None: + logger.warning( + f"Beginning migration to v0.0.3 for decoupled completions - {Set.select().count()} sets to migrate" + ) + db = DB.queues + with db.atomic(): + TempSet.create_table(safe=True) + for s in Set.select( + Set.path, + Set.sd, + Set.job, + Set.rank, + Set.count, + Set.remaining, + Set.material_keys, + Set.profile_keys, + ).execute(): + attrs = {} + for f in Set._meta.sorted_field_names: + attrs[f] = getattr(s, f) + attrs["completed"] = max(0, attrs["count"] - attrs["remaining"]) + TempSet.create(**attrs) + if logger is not None: + logger.warning(f"Migrating set {s.path} to schema v0.0.3") + + Set.drop_table(safe=True) + db.execute_sql('ALTER TABLE "tempset" RENAME TO "set";') + details.schemaVersion = "0.0.3" + details.save() + + +def init_queues(db_path, logger=None): db = DB.queues needs_init = not file_exists(db_path) db.init(None) @@ -304,8 +414,8 @@ def init(db_path="queues.sqlite3", logger=None): if needs_init: if logger is not None: - logger.debug("DB needs init") - populate() + logger.debug("Initializing queues DB") + populate_queues() else: try: details = StorageDetails.select().limit(1).execute()[0] @@ -322,43 +432,23 @@ def init(db_path="queues.sqlite3", logger=None): details.save() if details.schemaVersion == "0.0.2": - # Constraint removal isn't allowed in sqlite, so we have - # to recreate the table and move the entries over. - # We also added a new `completed` field, so some calculation is needed. - class TempSet(Set): - pass + migrateSchemaV2ToV3(details, logger) + if details.schemaVersion == "0.0.3": if logger is not None: logger.warning( - f"Beginning migration to v0.0.3 for decoupled completions - {Set.select().count()} sets to migrate" + f"Updating schema from {details.schemaVersion} to 0.0.4" ) - with db.atomic(): - TempSet.create_table(safe=True) - for s in Set.select( - Set.path, - Set.sd, - Set.job, - Set.rank, - Set.count, - Set.remaining, - Set.material_keys, - Set.profile_keys, - ).execute(): - attrs = {} - for f in Set._meta.sorted_field_names: - attrs[f] = getattr(s, f) - attrs["completed"] = max(0, attrs["count"] - attrs["remaining"]) - TempSet.create(**attrs) - if logger is not None: - logger.warning(f"Migrating set {s.path} to schema v0.0.3") - - Set.drop_table(safe=True) - db.execute_sql('ALTER TABLE "tempset" RENAME TO "set";') - details.schemaVersion = "0.0.3" + migrate( + migrator.add_column("set", "metadata", Set.metadata), + ) + details.schemaVersion = "0.0.4" details.save() - if details.schemaVersion != "0.0.3": - raise Exception("Unknown DB schema version: " + details.schemaVersion) + if details.schemaVersion != CURRENT_SCHEMA_VERSION: + raise Exception( + "DB schema version is not current: " + details.schemaVersion + ) if logger is not None: logger.debug("Storage schema version: " + details.schemaVersion) @@ -368,6 +458,23 @@ class TempSet(Set): return db +def migrateScriptsFromSettings(clearing_script, finished_script, cooldown_script): + # In v2.2.0 and earlier, a fixed list of scripts were stored in OctoPrint settings. + # This converts them to DB format for use in events. + with DB.automation.atomic(): + for (evt, name, body) in [ + (CustomEvents.PRINT_SUCCESS, BED_CLEARING_SCRIPT, clearing_script), + (CustomEvents.FINISH, FINISHING_SCRIPT, finished_script), + (CustomEvents.COOLDOWN, COOLDOWN_SCRIPT, cooldown_script), + ]: + if body is None or body.strip() == "": + continue # Don't add empty scripts + Script.delete().where(Script.name == name).execute() + s = Script.create(name=name, body=body) + EventHook.delete().where(EventHook.name == evt.event).execute() + EventHook.create(name=evt.event, script=s, rank=0) + + def migrateFromSettings(data: list): # Prior to v2.0.0, all state for the plugin was stored in a json-serialized list # in OctoPrint settings. This method converts the various forms of the json blob diff --git a/continuousprint/storage/database_test.py b/continuousprint/storage/database_test.py index e8c9f33..1f750c6 100644 --- a/continuousprint/storage/database_test.py +++ b/continuousprint/storage/database_test.py @@ -3,30 +3,87 @@ import logging from .database import ( migrateFromSettings, - init as init_db, + migrateScriptsFromSettings, + init_db, + init_queues, + init_automation, Queue, + migrateQueuesV2ToV3, Job, Set, Run, + Script, + EventHook, StorageDetails, DEFAULT_QUEUE, ) +from ..data import CustomEvents import tempfile # logging.basicConfig(level=logging.DEBUG) -class DBTest(unittest.TestCase): +class QueuesDBTest(unittest.TestCase): def setUp(self): - self.tmp = tempfile.NamedTemporaryFile(delete=True) - self.db = init_db(self.tmp.name, logger=logging.getLogger()) + self.tmpQueues = tempfile.NamedTemporaryFile(delete=True) + self.addCleanup(self.tmpQueues.close) + init_queues( + self.tmpQueues.name, + logger=logging.getLogger(), + ) self.q = Queue.get(name=DEFAULT_QUEUE) - def tearDown(self): - self.tmp.close() +class AutomationDBTest(unittest.TestCase): + def setUp(self): + self.tmpAutomation = tempfile.NamedTemporaryFile(delete=True) + self.addCleanup(self.tmpAutomation.close) + init_automation( + self.tmpAutomation.name, + logger=logging.getLogger(), + ) + + +class DBTest(QueuesDBTest, AutomationDBTest): + def setUp(self): + AutomationDBTest.setUp(self) + QueuesDBTest.setUp(self) + + +class TestScriptMigration(AutomationDBTest): + def testMigration(self): + migrateScriptsFromSettings("test_clearing", "test_finished", "test_cooldown") + self.assertEqual( + EventHook.get(name=CustomEvents.PRINT_SUCCESS.event).script.body, + "test_clearing", + ) + self.assertEqual( + EventHook.get(name=CustomEvents.FINISH.event).script.body, "test_finished" + ) + self.assertEqual( + EventHook.get(name=CustomEvents.COOLDOWN.event).script.body, "test_cooldown" + ) + + def testMigrationEmpty(self): + migrateScriptsFromSettings("test_clearing", "test_finished", "") + self.assertEqual( + EventHook.select() + .where(EventHook.name == CustomEvents.COOLDOWN.event) + .count(), + 0, + ) -class TestMigration(DBTest): + def testMigrationNone(self): + migrateScriptsFromSettings("test_clearing", "test_finished", None) + self.assertEqual( + EventHook.select() + .where(EventHook.name == CustomEvents.COOLDOWN.event) + .count(), + 0, + ) + + +class TestMigration(QueuesDBTest): def testMigrationEmptyDict(self): migrateFromSettings({}) self.assertEqual(Job.select().count(), 0) @@ -109,7 +166,7 @@ def testMigrationSchemav2tov3(self): rank=1, ) - self.db = init_db(self.tmp.name, logger=logging.getLogger()) + migrateQueuesV2ToV3(details, logger=logging.getLogger()) # Destination set both exists and has computed `completed` field. # We don't actually check whether the constraints were properly applied, just assume that @@ -118,14 +175,11 @@ def testMigrationSchemav2tov3(self): self.assertEqual(s2.completed, s.count - s.remaining) -class TestEmptyJob(DBTest): +class TestEmptyJob(QueuesDBTest): def setUp(self): super().setUp() self.j = Job.create(queue=self.q, name="a", rank=0, count=5, remaining=5) - def tearDown(self): - self.tmp.close() - def testNextSetNoSets(self): self.assertEqual(self.j.next_set(dict(name="foo")), None) @@ -134,7 +188,7 @@ def testDecrementNoSet(self): self.assertEqual(self.j.remaining, 4) -class TestJobWithSet(DBTest): +class TestJobWithSet(QueuesDBTest): def setUp(self): super().setUp() self.j = Job.create( @@ -150,9 +204,6 @@ def setUp(self): profile_keys="foo,baz", ) - def tearDown(self): - self.tmp.close() - def testNextSetDraft(self): self.j.draft = True self.assertEqual(self.j.next_set(dict(name="baz")), None) @@ -234,7 +285,7 @@ def testFromDict(self): self.assertEqual([s.path for s in j2.sets], [s.path for s in j.sets]) -class TestMultiSet(DBTest): +class TestMultiSet(QueuesDBTest): def setUp(self): super().setUp() self.j = Job.create( @@ -266,7 +317,7 @@ def testSetsAreSequential(self): self.assertEqual(self.j.next_set(p), self.s[1]) -class TestSet(DBTest): +class TestSet(QueuesDBTest): def setUp(self): super().setUp() self.j = Job.create( diff --git a/continuousprint/storage/lan.py b/continuousprint/storage/lan.py index a9b3c05..272ec75 100644 --- a/continuousprint/storage/lan.py +++ b/continuousprint/storage/lan.py @@ -30,6 +30,11 @@ def __init__(self, manifest, lq): def get_base_dir(self): return self.queue.lq.get_gjob_dirpath(self.peer, self.hash) + def remap_set_paths(self): + # Replace all relative/local set paths with fully resolved paths + for s in self.sets: + s.path = s.resolve() + def updateSets(self, sets_list): self.sets = [LANSetView(s, self, i) for i, s in enumerate(sets_list)] @@ -61,6 +66,7 @@ def __init__(self, data, job, rank): setattr(self, attr, data[attr]) self.remaining = getint(data, "remaining", default=self.count) self.completed = getint(data, "completed") + self.metadata = data.get("metadata") self.material_keys = ",".join(data.get("materials", [])) self.profile_keys = ",".join(data.get("profiles", [])) self._resolved = None diff --git a/continuousprint/storage/lan_test.py b/continuousprint/storage/lan_test.py index 2489b7d..f85d0f4 100644 --- a/continuousprint/storage/lan_test.py +++ b/continuousprint/storage/lan_test.py @@ -25,6 +25,11 @@ def test_resolve_file(self): self.lq.get_gjob_dirpath.return_value = "/path/to/" self.assertEqual(self.s.resolve(), "/path/to/a.gcode") + def test_remap_set_paths(self): + self.lq.get_gjob_dirpath.return_value = "/path/to/" + self.j.remap_set_paths() + self.assertEqual(self.s.path, "/path/to/a.gcode") + def test_resolve_http_error(self): self.lq.get_gjob_dirpath.side_effect = HTTPError with self.assertRaises(ResolveError): diff --git a/continuousprint/storage/queries.py b/continuousprint/storage/queries.py index e803c2f..cdacd59 100644 --- a/continuousprint/storage/queries.py +++ b/continuousprint/storage/queries.py @@ -1,11 +1,24 @@ from peewee import IntegrityError, JOIN, fn from typing import Optional from datetime import datetime +import re import time import base64 from pathlib import Path -from .database import Queue, Job, Set, Run, DB, DEFAULT_QUEUE, ARCHIVE_QUEUE +from .database import ( + Queue, + Job, + Set, + Run, + DB, + DEFAULT_QUEUE, + ARCHIVE_QUEUE, + EventHook, + Preprocessor, + Script, +) +from ..data import CustomEvents MAX_COUNT = 999999 @@ -170,7 +183,6 @@ def _upsertSet(set_id, data, job): "remaining", ): v = min(int(v), MAX_COUNT) - setattr(s, k, v) s.job = job @@ -269,9 +281,6 @@ def _moveImpl(src, dest_id, retried=False): postRank = MAX_RANK # Pick the target value as the midpoint between the two ranks candidate = abs(postRank - destRank) / 2 + min(postRank, destRank) - # print( - # f"_moveImpl abs({postRank} - {destRank})/2 + min({postRank}, {destRank}) = {candidate}" - # ) # We may end up with an invalid candidate if we hit a singularity - in this case, rebalance all the # rows and try again @@ -330,6 +339,7 @@ def appendSet(queue: str, jid, data: dict, rank=_rankEnd): material_keys=",".join(data.get("materials", "")), profile_keys=",".join(data.get("profiles", "")), count=count, + metadata=data.get("metadata", None), remaining=getint(data, "remaining", count), completed=getint(data, "completed"), job=j, @@ -437,3 +447,99 @@ def getHistory(): def resetHistory(): Run.delete().execute() + + +def assignAutomation(scripts, preprocessors, events): + with DB.automation.atomic(): + EventHook.delete().execute() + Preprocessor.delete().execute() + Script.delete().execute() + s = dict() + for k, v in scripts.items(): + s[k] = Script.create(name=k, body=v) + p = dict() + for k, v in preprocessors.items(): + p[k] = Preprocessor.create(name=k, body=v) + + validEvents = set([e.event for e in CustomEvents]) + for k, e in events.items(): + if k not in validEvents: + raise KeyError(f"No such CPQ event {k}, options: {validEvents}") + for i, a in enumerate(e): + pre = None + if a.get("preprocessor") not in ("", None): + pre = p[a["preprocessor"]] + EventHook.create( + name=k, script=s[a["script"]], preprocessor=pre, rank=i + ) + + +def getAutomation(): + scripts = dict() + preprocessors = dict() + events = dict([(e.event, []) for e in CustomEvents]) + for s in Script.select(): + scripts[s.name] = s.body + for p in Preprocessor.select(): + preprocessors[p.name] = p.body + for e in ( + EventHook.select() + .join_from(EventHook, Script, JOIN.LEFT_OUTER) + .join_from(EventHook, Preprocessor, JOIN.LEFT_OUTER) + ): + events[e.name].append( + dict( + script=e.script.name, + preprocessor=e.preprocessor.name + if e.preprocessor is not None + else None, + ) + ) + + return dict(scripts=scripts, events=events, preprocessors=preprocessors) + + +def genEventScript(evt: CustomEvents, interp=None, logger=None) -> str: + result = [] + for e in ( + EventHook.select() + .join_from(EventHook, Script, JOIN.LEFT_OUTER) + .join_from(EventHook, Preprocessor, JOIN.LEFT_OUTER) + .where(EventHook.name == evt.event) + .order_by(EventHook.rank) + ): + procval = True + if e.preprocessor is not None and e.preprocessor.body.strip() != "": + procval = interp(e.preprocessor.body) + if logger: + logger.info( + f"EventHook preprocessor for script {e.script.name} ({e.preprocessor.name}): {e.preprocessor.body}\nSymbols: {interp.symtable}\nResult: {procval}" + ) + + if procval is None or procval is False: + continue + elif procval is True: + formatted = e.script.body + elif type(procval) is dict: + if logger: + logger.info( + f"Appending script {e.script.name} using formatting data {procval}" + ) + formatted = e.script.body.format(**procval) + else: + raise Exception( + f"Invalid return type {type(procval)} for peprocessor {e.preprocessor.name}" + ) + + leftovers = re.findall(r"\{.*?\}", formatted) + if len(leftovers) > 0: + ppname = ( + f"f from preprocessor {e.preprocessor.name}" + if e.preprocessor is not None + else "" + ) + raise Exception( + f"Unformatted placeholders in {e.script.name}{ppname}: {leftovers}" + ) + result.append(formatted) + return "\n".join(result) diff --git a/continuousprint/storage/queries_test.py b/continuousprint/storage/queries_test.py index 8f53b64..45f1800 100644 --- a/continuousprint/storage/queries_test.py +++ b/continuousprint/storage/queries_test.py @@ -1,6 +1,7 @@ import unittest from unittest.mock import ANY from pathlib import Path +from ..data import CustomEvents import logging import datetime import tempfile @@ -14,17 +15,19 @@ Set, Run, Queue, - init as init_db, DEFAULT_QUEUE, ARCHIVE_QUEUE, + EventHook, + Script, + Preprocessor, ) -from .database_test import DBTest +from .database_test import QueuesDBTest, AutomationDBTest from ..storage import queries as q PROFILE = dict(name="profile") -class TestEmptyQueue(DBTest): +class TestEmptyQueue(QueuesDBTest): def setUp(self): super().setUp() @@ -92,7 +95,7 @@ def testReplenishSilentOnFailedLookup(self): q.resetJobs([1, 2, 3]) -class TestSingleItemQueue(DBTest): +class TestSingleItemQueue(QueuesDBTest): def setUp(self): super().setUp() q.appendSet( @@ -262,7 +265,7 @@ def testUpdateSetCountAndRemaining(self): self.assertEqual(s2.remaining, 15) -class TestMultiItemQueue(DBTest): +class TestMultiItemQueue(QueuesDBTest): def setUp(self): super().setUp() @@ -363,3 +366,118 @@ def testAnnotateLastRun(self): r = Run.get(id=r.id) self.assertEqual(r.movie_path, "movie_path.mp4") self.assertEqual(r.thumb_path, "thumb_path.png") + + +class TestAutomation(AutomationDBTest): + def testAssignGet(self): + evt = CustomEvents.PRINT_SUCCESS.event + q.assignAutomation( + dict(foo="bar"), dict(), {evt: [dict(script="foo", preprocessor=None)]} + ) + got = q.getAutomation() + self.assertEqual(got["scripts"], dict(foo="bar")) + self.assertEqual(got["preprocessors"], dict()) + self.assertEqual(got["events"][evt], [dict(script="foo", preprocessor=None)]) + + def testAssignBadEventKey(self): + with self.assertRaisesRegexp(KeyError, "No such CPQ event"): + q.assignAutomation( + dict(), dict(), dict(evt=[dict(script="foo", preprocessor=None)]) + ) + + def testAssignMissingScript(self): + evt = CustomEvents.PRINT_SUCCESS.event + with self.assertRaises(KeyError): + q.assignAutomation( + dict(), dict(), {evt: [dict(script="foo", preprocessor=None)]} + ) + + def testMultiScriptEvent(self): + evt = CustomEvents.PRINT_SUCCESS.event + q.assignAutomation( + dict(s1="gcode1", s2="gcode2"), + dict(), + dict( + [ + ( + evt, + [ + dict(script="s1", preprocessor=""), + dict(script="s2", preprocessor=""), + ], + ) + ] + ), + ) + self.assertEqual(q.genEventScript(CustomEvents.PRINT_SUCCESS), "gcode1\ngcode2") + + # Ordering of event matters + q.assignAutomation( + dict(s1="gcode1", s2="gcode2"), + dict(), + dict( + [ + ( + evt, + [ + dict(script="s2", preprocessor=""), + dict(script="s1", preprocessor=""), + ], + ) + ] + ), + ) + self.assertEqual(q.genEventScript(CustomEvents.PRINT_SUCCESS), "gcode2\ngcode1") + + def testPreprocessorEvalTrueFalseNone(self): + e = CustomEvents.PRINT_SUCCESS + q.assignAutomation( + dict(s1="gcode1"), + dict(p1="python1"), + dict([(e.event, [dict(script="s1", preprocessor="p1")])]), + ) + + self.assertEqual(q.genEventScript(e, lambda cond: True), "gcode1") + self.assertEqual(q.genEventScript(e, lambda cond: False), "") + self.assertEqual(q.genEventScript(e, lambda cond: None), "") + + def testNoProcessorPlaceholder(self): + e = CustomEvents.PRINT_SUCCESS + q.assignAutomation( + dict(s1="{foo} will never be formatted!"), + dict(), + dict([(e.event, [dict(script="s1", preprocessor=None)])]), + ) + with self.assertRaises(Exception): + q.genEventScript(e, lambda cond: False) + + def testProcessorEvalFormat(self): + e = CustomEvents.PRINT_SUCCESS + q.assignAutomation( + dict(s1="Hello {val}"), + dict(p1="mocked"), + dict([(e.event, [dict(script="s1", preprocessor="p1")])]), + ) + self.assertEqual( + q.genEventScript(e, lambda cond: dict(val="World")), "Hello World" + ) + + def testProcessorEvalBadType(self): + e = CustomEvents.PRINT_SUCCESS + q.assignAutomation( + dict(s1="don'tcare"), + dict(p1="mocked"), + dict([(e.event, [dict(script="s1", preprocessor="p1")])]), + ) + with self.assertRaises(Exception): + q.genEventScript(e, lambda cond: 7) + + def testProcessorEvalMissedPlaceholder(self): + e = CustomEvents.PRINT_SUCCESS + q.assignAutomation( + dict(s1="{foo} will never be formatted!"), + dict(p1="mocked"), + dict([(e.event, [dict(script="s1", preprocessor="p1")])]), + ) + with self.assertRaises(Exception): + q.genEventScript(e, lambda cond: dict(bar="baz")) diff --git a/continuousprint/templates/continuousprint_settings.jinja2 b/continuousprint/templates/continuousprint_settings.jinja2 index 909b046..d04401a 100644 --- a/continuousprint/templates/continuousprint_settings.jinja2 +++ b/continuousprint/templates/continuousprint_settings.jinja2 @@ -6,21 +6,24 @@ -