diff --git a/astrodata/core.py b/astrodata/core.py index 167d5424f..4f16c8b6d 100644 --- a/astrodata/core.py +++ b/astrodata/core.py @@ -444,6 +444,20 @@ def wcs(self): def wcs(self, value): self.nddata.wcs = value + @property + def record(self): + """Returns the record of reduction that produced this file, or None.""" + if self.is_single: + return self.nddata.record + else: + raise ValueError("Cannot return record for an AstroData object " + "that is not a single slice") + + @record.setter + @assign_only_single_slice + def record(self, value): + self.nddata.record = value + def __iter__(self): if self.is_single: yield self diff --git a/astrodata/fits.py b/astrodata/fits.py index e44175dc1..b0497516d 100644 --- a/astrodata/fits.py +++ b/astrodata/fits.py @@ -448,6 +448,7 @@ def associated_extensions(ver): 'uncertainty': None, 'mask': None, 'wcs': None, + 'record': None, 'other': [], } @@ -461,6 +462,8 @@ def associated_extensions(ver): parts['uncertainty'] = extra_unit elif name == 'WCS': parts['wcs'] = extra_unit + elif name == 'RECORD': + parts['record'] = extra_unit else: parts['other'].append(extra_unit) @@ -510,6 +513,11 @@ def associated_extensions(ver): # In case WCS info is in the PHU nd.wcs = fitswcs_to_gwcs(hdulist[0].header) + if parts['record'] is not None: + nd.record = asdftablehdu_to_record(parts['record']) + else: + nd.record = None + ad.append(nd, name=DEFAULT_EXTENSION) # This is used in the writer to keep track of the extensions that @@ -598,6 +606,9 @@ def ad_to_hdulist(ad): if isinstance(wcs, gWCS): hdul.append(wcs_to_asdftablehdu(ext.wcs, extver=ver)) + if hasattr(ext, "record") and ext.record is not None: + hdul.append(record_to_asdftablehdu(ext.record, extver=ver)) + for name, other in ext.meta.get('other', {}).items(): if isinstance(other, Table): hdu = table_to_bintablehdu(other, extname=name) @@ -851,3 +862,164 @@ def asdftablehdu_to_wcs(hdu): return return wcs + + +def data_to_asdftablehdu(name, table_name, col_name, data, extver=None): + """ + Serialize a data object as a FITS TableHDU (ASCII) extension. + + The ASCII table is actually a mini ASDF file. The constituent AstroPy + models must have associated ASDF "tags" that specify how to serialize them. + + In the event that serialization as pure ASCII fails (this should not + happen), a binary table representation will be used as a fallback. + + Parameters + ---------- + name : str + Name in the Asdf of the data to be saved. This is something like 'wcs' or 'record' and should be unique. + col_name : str + Name of the column to holding the data + table_name : str + Name of the bintable holding the column with the data + data : str + Blob of data to save, in some string encoded form + """ + + # Create a small ASDF file in memory containing the WCS object + # representation because there's no public API for generating only the + # relevant YAML subsection and an ASDF file handles the "tags" properly. + try: + dat = {} + dat[name] = data + af = asdf.AsdfFile(dat) + except jsonschema.exceptions.ValidationError: + # (The original traceback also gets printed here) + raise TypeError("Cannot serialize model(s) for '{}' extension {}" + .format(table_name, extver or '')) + + # ASDF can only dump YAML to a binary file object, so do that and read + # the contents back from it for storage in a FITS extension: + with BytesIO() as fd: + with af: + # Generate the YAML, dumping any binary arrays as text: + af.write_to(fd, all_array_storage='inline') + fd.seek(0) + databuf = fd.read() + + # Convert the bytes to readable lines of text for storage (falling back to + # saving as binary in the unexpected event that this is not possible): + try: + databuf = databuf.decode('ascii').splitlines() + except UnicodeDecodeError: + # This should not happen, but if the ASDF contains binary data in + # spite of the 'inline' option above, we have to dump the bytes to + # a non-human-readable binary table rather than an ASCII one: + LOGGER.warning("Could not convert {} ASDF to ASCII; saving table " + "as binary".format(extver or '')) + hduclass = BinTableHDU + fmt = 'B' + databuf = np.frombuffer(databuf, dtype=np.uint8) + else: + hduclass = TableHDU + fmt = 'A{}'.format(max(len(line) for line in databuf)) + + # Construct the FITS table extension: + col = Column(name=col_name, format=fmt, array=databuf, + ascii=hduclass is TableHDU) + return hduclass.from_columns([col], name=table_name, ver=extver) + + +def asdftablehdu_to_data(hdu, name, col_name): + """ + Recreate a previously stored data object from its serialization in a FITS table extension. + + Returns None (issuing a warning) if the extension cannot be parsed, so + the rest of the file can still be read. + + Parameters + ---------- + hdu : :class:`~.BinTableHDU` + HDU to extract data from + name : str + Name data is stored under + col_name : str + Name of the column to holding the data + """ + + ver = hdu.header.get('EXTVER', -1) + + if isinstance(hdu, (TableHDU, BinTableHDU)): + try: + colarr = hdu.data[col_name] + except KeyError: + LOGGER.warning("Ignoring extension {} with no '{}' table " + "column".format(ver, col_name)) + return + + # If this table column contains text strings as expected, join the rows + # as separate lines of a string buffer and encode the resulting YAML as + # bytes that ASDF can parse. If AstroData has produced another format, + # it will be a binary dump due to the unexpected presence of non-ASCII + # data, in which case we just extract unmodified bytes from the table. + if colarr.dtype.kind in ('U', 'S'): + sep = os.linesep + # Just in case io.fits ever produces 'S' on Py 3 (not the default): + # join lines as str & avoid a TypeError with unicode linesep; could + # also use astype('U') but it assumes an encoding implicitly. + if colarr.dtype.kind == 'S' and not isinstance(sep, bytes): + colarr = np.char.decode(np.char.rstrip(colarr), + encoding='ascii') + databuf = sep.join(colarr).encode('ascii') + else: + databuf = colarr.tobytes() + + # Convert the stored text to a Bytes file object that ASDF can open: + with BytesIO(databuf) as fd: + + # Try to extract a 'wcs' entry from the YAML: + try: + af = asdf.open(fd) + except Exception: + LOGGER.warning("Ignoring {} extension {}: failed to parse " + "ASDF.\nError was as follows:\n{}" + .format(name, ver, traceback.format_exc())) + return + else: + with af: + try: + record = af.tree[name] + except KeyError: + LOGGER.warning("Ignoring extension {}: missing " + "'{}' dict entry.".format(ver, name)) + return + + else: + LOGGER.warning("Ignoring non-FITS-table '{}' extension {}" + .format(name.upper(), ver)) + return + + return record + + +def record_to_asdftablehdu(record, extver=None): + """ + Serialize a reduce record object as a FITS TableHDU (ASCII) extension. + + The ASCII table is actually a mini ASDF file. The constituent AstroPy + models must have associated ASDF "tags" that specify how to serialize them. + + In the event that serialization as pure ASCII fails (this should not + happen), a binary table representation will be used as a fallback. + """ + return data_to_asdftablehdu('record', 'RECORD', 'record', record, extver) + + +def asdftablehdu_to_record(hdu): + """ + Recreate a Reduce Record object from its serialization in a FITS table extension. + + Returns None (issuing a warning) if the extension cannot be parsed, so + the rest of the file can still be read. + """ + return asdftablehdu_to_data(hdu, 'record', 'record') diff --git a/astrodata/tests/test_nddata.py b/astrodata/tests/test_nddata.py index 79b4981bc..87acaa650 100644 --- a/astrodata/tests/test_nddata.py +++ b/astrodata/tests/test_nddata.py @@ -128,6 +128,14 @@ def test_wcs_slicing(): assert nd[20, -10:].wcs(0) == (40, 20) +def test_record(): + nd = NDAstroData(np.zeros((4, 4))) + record_data = {"test": "foo"} + nd.record = record_data + # should read out the same + assert nd.record == record_data + + def test_access_to_other_planes(testnd): assert hasattr(testnd, 'OBJMASK') assert testnd.OBJMASK.shape == testnd.data.shape diff --git a/geminidr/interactive/fit/aperture.py b/geminidr/interactive/fit/aperture.py index ab19c600b..c237dfdbe 100644 --- a/geminidr/interactive/fit/aperture.py +++ b/geminidr/interactive/fit/aperture.py @@ -809,6 +809,11 @@ def __init__(self, model, filename_info='', ui_params=None): self.model = model self.fig = None self.help_text = DETAILED_HELP + self.inputs = dict() + + # moving this here so widgets are initialized in case + # we are reloading state from saved json via --record/--replay + self.params = self.parameters_view() # Customize the max_separation behavior away from the defaults. In particular, # we depend on extracting some information from the model which was not readily @@ -880,7 +885,7 @@ def fn(): self.make_modal(find_button, 'Recalculating Apertures...') self.make_modal(reset_button, 'Recalculating Apertures...') - return column( + retval = column( Div(text="Parameters to compute the profile:", css_classes=['param_section']), *widgets[0:5], @@ -891,6 +896,16 @@ def fn(): width_policy="min", ) + # save our input widgets for record/load if needed + for widget in (maxaper, minsky, use_snr, + threshold, percentile, sizing, sections): + self.inputs[widget.attr] = widget + + # Moving this to here so it happens before any load of saved state + self.model.recalc_apertures() + + return retval + def visualize(self, doc): """ Build the visualization in bokeh in the given browser document. @@ -904,7 +919,7 @@ def visualize(self, doc): bokeh_data_color = interactive_conf().bokeh_data_color - params = self.parameters_view() + params = self.params # self.parameters_view() ymax = 100 # we will update this when we have a profile aperture_view = ApertureView(self.model, self.model.profile_shape, ymax) @@ -944,12 +959,13 @@ def handle_clear(okc): else row(clear_button, renumber_button), ]) - self.model.recalc_apertures() + # moved to constructor, this would overwrite the results of a load() + # self.model.recalc_apertures() col = column(children=[aperture_view.fig, helptext], sizing_mode='scale_width') - for btn in (self.submit_button, self.abort_button): + for btn in (self.submit_button, self.abort_button, self.reset_all_button): btn.align = 'end' btn.height = 35 btn.height_policy = "fixed" @@ -958,7 +974,8 @@ def handle_clear(okc): btn.width_policy = "fixed" toolbar = row(Spacer(width=250), - column(self.get_filename_div(), row(self.abort_button, self.submit_button)), + column(self.get_filename_div(), + row(self.reset_all_button, self.abort_button, self.submit_button)), Spacer(width=10), align="end", css_classes=['top-row']) @@ -998,6 +1015,57 @@ def result(self): return [[], []] return np.array(locations), limits + def record(self): + """ + Record the state of the interactive UI. + + This enhances the record from the base class with additional state + information specific to the Fit1D Visualizer. This includes per-tab + fitting parameters and the current state of the data mask. + + Returns + ------- + dict : Dictionary representing the state of the inputs + """ + retval = super().record() + aperture_inputs = dict() + for k, v in self.inputs.items(): + aperture_inputs[k] = getattr(self.model, k) + retval["aperture_inputs"] = aperture_inputs + apertures = dict() + for aperture_id, aperture_model in self.model.aperture_models.items(): + aperture = dict() + aperture['location'] = aperture_model.source.data['location'][0] + aperture['start'] = aperture_model.source.data['start'][0] + aperture['end'] = aperture_model.source.data['end'][0] + apertures[aperture_id] = aperture + retval['apertures'] = apertures + return retval + + def load(self, record): + """ + Load the state of the interactive UI + + This reads in the saved state of a previous run and applies it + to the visualizer. + + Parameters + ---------- + record : dict + Dictionary with recorded state of the visualizer + """ + super().load(record) + for k, v in record["aperture_inputs"].items(): + if k != 'section': + setattr(self.model, k, v) + self.inputs[k].reset() + ap_ids = list() + ap_ids.extend(self.model.aperture_models.keys()) + for aperture_id in ap_ids: + self.model.delete_aperture(aperture_id) + for aperture_id, aperture in record["apertures"].items(): + self.model.add_aperture(aperture["location"], aperture["start"], aperture["end"]) + def interactive_find_source_apertures(ext, ui_params=None, **kwargs): """ @@ -1009,7 +1077,6 @@ def interactive_find_source_apertures(ext, ui_params=None, **kwargs): also interact directly with the found aperutres as desired. When the user hits the `Submit` button, this method will return the results of the find to the caller. - """ model = FindSourceAperturesModel(ext, **kwargs) fsav = FindSourceAperturesVisualizer(model, ui_params=ui_params, filename_info=ext.filename) diff --git a/geminidr/interactive/fit/fit1d.py b/geminidr/interactive/fit/fit1d.py index e3b178c92..1834ff464 100644 --- a/geminidr/interactive/fit/fit1d.py +++ b/geminidr/interactive/fit/fit1d.py @@ -16,8 +16,11 @@ from geminidr.interactive.interactive_config import interactive_conf from gempy.library.astrotools import cartesian_regions_to_slices from gempy.library.fitting import fit_1D +from gempy.utils import logutils +_log = logutils.get_logger(__name__) + # Names to use for masks. You can change these to change the label that gets displayed in the legend SIGMA_MASK_NAME = 'rejected (sigma)' USER_MASK_NAME = 'rejected (user)' @@ -901,15 +904,18 @@ def min_max_pad(data, default_min, default_max): return mn, mx, 0.1 * (mx - mn) # if xdata or ydata are empty, we set some arbitrary values so the UI is ok x_min, x_max, x_pad = min_max_pad(xdata, 0, 4000) - if x_min != x_max: - self.p_main.x_range.update(start=x_min - x_pad, end=x_max + x_pad * 2) - y_min, y_max, y_pad = min_max_pad(ydata, 0, 100) - if y_min != y_max: - self.p_main.y_range.update(start=y_min - y_pad, end=y_max + y_pad) + if self.p_main: + if x_min != x_max: + self.p_main.x_range.update(start=x_min - x_pad, end=x_max + x_pad * 2) + y_min, y_max, y_pad = min_max_pad(ydata, 0, 100) + if y_min != y_max: + self.p_main.y_range.update(start=y_min - y_pad, end=y_max + y_pad) if x_range is not None: - self.p_main.x_range = x_range + if self.p_main: + self.p_main.x_range = x_range if y_range is not None: - self.p_main.y_range = y_range + if self.p_main: + self.p_main.y_range = y_range def reset_dialog_handler(self, result): """ @@ -1040,6 +1046,71 @@ def _point_mask_handler(self, x, y, mult, action): self.model.perform_fit() + def record(self): + """ + Record the state of this tab into a dictionary. + + This call returns a dictionary representation of the state of this tab interface. + This dictionary can be sent back in to the :meth:`load` method to restore the state + at a later time. + + Returns + ------- + dict : Dictionary describing the state of the user interface + """ + def encode_mask(mask): + retval = "" + for mask_item in mask: + if mask_item == USER_MASK_NAME: + retval = retval + "1" + else: + retval = retval + "0" + return retval + return { + "mask": encode_mask(self.model.data.data['mask']), + "params": self.model.fit.extract_params() + } + + def load(self, record): + """ + Load the state of this tab from a dictionary. + + This call loads the state of the interface tab from a previously saved dictionary + from :meth:`record`. + + Parameters + ---------- + record : dict + Dictionary of saved state from :meth:`record` + """ + def decode_mask(mask): + if isinstance(mask, list): + return mask + else: + retval = list() + for char in mask: + if char == '1': + retval.append(USER_MASK_NAME) + else: + retval.append('good') + return retval + self.model.data.data['mask'] = decode_mask(record["mask"]) + if "regions" in record["params"]: + region_tuples = cartesian_regions_to_slices(record["params"]["regions"]) + self.model.band_model.load_from_tuples(region_tuples) + if "function" in record["params"]: + self.fitting_parameters_ui.function.select(record["params"]["function"]) + self.fitting_parameters_ui.order_slider.children[1].value = record["params"]["order"] + self.fitting_parameters_ui.sigma_lower_slider.children[1].value = record["params"]["sigma_lower"] + self.fitting_parameters_ui.sigma_upper_slider.children[1].value = record["params"]["sigma_upper"] + niter = record["params"]["niter"] + if niter == 0: + self.fitting_parameters_ui.sigma_button.active = [] + else: + self.fitting_parameters_ui.sigma_button.active = [0] + self.fitting_parameters_ui.niter_slider.children[1].value = record["params"]["niter"] + self.model.perform_fit() + # TODO refactored this down from tracing, but it breaks # x/y tracking when the mouse moves in the figure for calculateSensitivity @staticmethod @@ -1284,6 +1355,50 @@ def kickoff_modal(attr, old, new): self.fits.append(tui.model) self.panels.append(tui) + self._reinit_params = {k: v for k, v in ui_params.values.items()} + self._record_params = {k: v for k, v in ui_params.values.items()} + + # noinspection PyProtectedMember + def reset_reinit_panel(self, param=None): + """ + Reset all the parameters in the Tracing Panel (leftmost column). + If a param is provided, it resets only this parameter in particular. + + Parameters + ---------- + param : str + Parameter name + """ + for fname in self.ui_params.reinit_params: + if param is None or fname == param: + reset_value = self._reinit_params[fname] + else: + continue + + # Handle CheckboxGroup widgets + if hasattr(self.widgets[fname], "value"): + attr = "value" + else: + attr = "active" + reset_value = [0] if reset_value else [] + old = getattr(self.widgets[fname], attr) + + # Update widget value + if reset_value is None: + kwargs = {attr: self.widgets[fname].start, "show_value": False} + else: + kwargs = {attr: reset_value} + self.widgets[fname].update(**kwargs) + + # Update Text Field via callback function + if 'value' in self.widgets[fname]._callbacks: + for callback in self.widgets[fname]._callbacks['value']: + callback('value', old=old, new=reset_value) + if 'value_throttled' in self.widgets[fname]._callbacks: + for callback in self.widgets[fname]._callbacks['value_throttled']: + callback(attrib='value_throttled', old=old, new=reset_value) + + def visualize(self, doc): """ Start the bokeh document using this visualizer. @@ -1301,7 +1416,7 @@ def visualize(self, doc): col.sizing_mode = 'scale_width' col.width_policy = 'max' - for btn in (self.submit_button, self.abort_button): + for btn in (self.submit_button, self.abort_button, self.reset_all_button): btn.align = 'end' btn.height = 35 btn.height_policy = "fixed" @@ -1313,11 +1428,12 @@ def visualize(self, doc): if self.filename_info: self.submit_button.align = 'end' layout_ls.append(row(Spacer(width=250), - column(self.get_filename_div(), row(self.abort_button, self.submit_button)), + column(self.get_filename_div(), + row(self.reset_all_button, self.abort_button, self.submit_button)), Spacer(width=10), align="end", css_classes=['top-row'])) else: - layout_ls.append(row(self.abort_button, self.submit_button), + layout_ls.append(row(self.reset_all_button, self.abort_button, self.submit_button), align="end", css_classes=['top-row']) if self.reinit_panel is None: @@ -1401,6 +1517,66 @@ def results(self): """ return [fit.fit for fit in self.fits] + def record(self): + """ + Record the state of the interactive UI. + + This enhances the record from the base class with additional state + information specific to the Fit1D Visualizer. This includes per-tab + fitting parameters and the current state of the data mask. + + Returns + ------- + dict : Dictionary representing the state of the inputs + """ + retval = super().record() + retval["tabs"] = list() + reinit_params = dict() + if self.ui_params.reinit_params is not None: + for fname in self.ui_params.reinit_params: + reinit_params[fname] = self.ui_params.values[fname] + retval["reinit_params"] = reinit_params # self._reinit_params.copy() + for tab in self.panels: + retval["tabs"].append(tab.record()) + return retval + + def load(self, record, reconstruct_points=True): + """ + Load the visualizer state from a saved record. + + This method will load this interactive visualizer from values saved + by an earlier call to :meth:`record`. + + Parameters + ---------- + record : dict + Dictionary capturing the state of this visualizer, as returned by :meth:`record` + reconstruct_points : bool + If True, call the reconstruct_points after changing the reinit_params. Defaults to True + """ + super().load(record) + + # This won't work any more, we only capture the reinit parameters on entry, not on success + # should that change? + saw_change = False + if self.ui_params.reinit_params is not None: + for fname in self.ui_params.reinit_params: + oldval = self._reinit_params[fname] + newval = record["reinit_params"][fname] + if newval != oldval: + saw_change = True + self._reinit_params[fname] = newval + if saw_change: + # now apply the reinit params + self.reset_reinit_panel() + # take points from saved values, incorporates user edits + if reconstruct_points: + self.reconstruct_points() + + # now restore the tabs + for tab, tab_record in zip(self.panels, record["tabs"]): + tab.load(tab_record) + def prep_fit1d_params_for_fit1d(fit1d_params): """ diff --git a/geminidr/interactive/fit/wavecal.py b/geminidr/interactive/fit/wavecal.py index 0c7db6c4f..c5fe73fd5 100644 --- a/geminidr/interactive/fit/wavecal.py +++ b/geminidr/interactive/fit/wavecal.py @@ -444,6 +444,27 @@ def handle_line_wavelength(self, attrib, old, new): if new is not None and wavestr(new) not in self.new_line_dropdown.options: self.add_new_line() + def record(self): + def listify(l): + if isinstance(l, list): + return l + retval = list() + retval.extend(l) + return retval + retval = super().record() + wavecal_data = dict() + for k, v in self.model.data.data.items(): + wavecal_data[k] = listify(v) + retval['wavecal_data'] = wavecal_data + return retval + + def load(self, record): + self.model.data.data = record['wavecal_data'] + # use base class load, but do not reconstruct points - we did that already by loading wavecal_data + super().load(record, reconstruct_points=False) + # the superclass will redo the fit for us + + class WavelengthSolutionVisualizer(Fit1DVisualizer): """ A Visualizer specific to determineWavelengthSolution @@ -471,6 +492,23 @@ def image(self): image.append(model.y[goodpix]) return image + def record(self): + """ + Record the state of the interactive UI. + + This enhances the record from the base class with additional state + information specific to the Fit1D Visualizer. This includes per-tab + fitting parameters and the current state of the data mask. + + Returns + ------- + dict : Dictionary representing the state of the inputs + """ + return super().record() + + def load(self, record): + super().load(record) + def get_closest(arr, value): """ diff --git a/geminidr/interactive/interactive.py b/geminidr/interactive/interactive.py index d4a642945..013167684 100644 --- a/geminidr/interactive/interactive.py +++ b/geminidr/interactive/interactive.py @@ -24,6 +24,7 @@ "GIRegionListener", "GIRegionModel", "RegionEditor", "TabsTurboInjector", "UIParameters", "do_later"] +from recipe_system.utils.reduce_recorder import record_interactive, in_replay _visualizer = None @@ -92,6 +93,10 @@ def __init__(self, title='', primitive_name='', # set help to default, subclasses should override this with something specific to them self.help_text = help_text if help_text else DEFAULT_HELP + # JSON encoded state for the interface. This will either be populated by + # load() if we are doing a replay, or by post_show() once the UI is shown. + self.reset_all_state = None + self.exited = False self.title = title self.filename_info = filename_info if filename_info else '' @@ -117,6 +122,13 @@ def __init__(self, title='', primitive_name='', label="Abort", name="abort_btn", ) + self.reset_all_button = Button(align='center', + button_type='warning', + css_classes=["submit_btn"], + id="_reset_all_btn", + label="Reset All", + name="reset_all_btn", + ) # The submit_button_handler is only needed to flip the user_accepted flag to True before # the bokeh event loop terminates # self.submit_button.on_click(self.submit_button_handler) @@ -146,6 +158,8 @@ def __init__(self, title='', primitive_name='', self.abort_button.on_click(self.abort_button_handler) self.abort_button.js_on_change('disabled', abort_callback) + self.reset_all_button.on_click(self.reset_all_button_handler) + self.doc = None self._message_holder = None # callback for the new (buttonless) ok/cancel dialog. @@ -273,26 +287,38 @@ def submit_button_handler(self): 2) The fit is bad, we pop up a message dialog for the user and they hit 'OK' to return to the UI 3) The fit is poor, we pop up an ok/cancel dialog for the user and continue or return to the UI as directed. """ - bad_fits = ", ".join(tab.title for fit, tab in zip(self.fits, self.tabs.tabs) - if fit.quality == FitQuality.BAD) - poor_fits = ", ".join(tab.title for fit, tab in zip(self.fits, self.tabs.tabs) - if fit.quality == FitQuality.POOR) - if bad_fits: - # popup message - self.show_user_message(f"Failed fit(s) on {bad_fits}. Please " - "modify the parameters and try again.") - elif poor_fits: - def cb(accepted): - if accepted: - # Trigger the exit/fit, otherwise we do nothing + # actual submit logic lives in this callback. If we need to show the dialog to the user because + # we are in a replay, we'll use this callback after their answer. If not, we call it directly. + def do_submit(accepted=True): + if accepted: + bad_fits = ", ".join(tab.title for fit, tab in zip(self.fits, self.tabs.tabs) + if fit.quality == FitQuality.BAD) + poor_fits = ", ".join(tab.title for fit, tab in zip(self.fits, self.tabs.tabs) + if fit.quality == FitQuality.POOR) + if bad_fits: + # popup message + self.show_user_message(f"Failed fit(s) on {bad_fits}. Please " + "modify the parameters and try again.") + elif poor_fits: + def cb(accepted): + if accepted: + # Trigger the exit/fit, otherwise we do nothing + self.submit_button.disabled = True + self.show_ok_cancel(f"Poor quality fit(s)s on {poor_fits}. Click " + "OK to proceed anyway, or Cancel to return to " + "the fitter.", cb) + else: + # Fit is good, we can exit + # Trigger the submit callback via disabling the submit button self.submit_button.disabled = True - self.show_ok_cancel(f"Poor quality fit(s)s on {poor_fits}. Click " - "OK to proceed anyway, or Cancel to return to " - "the fitter.", cb) + if in_replay() and self.record() != self.reset_all_state: + self.show_ok_cancel("You have made changes. Submitting this will stop the current replay. You " + "will be able to continue the reduction as normal. Click OK to proceed " + "anyway, or Cancel to return to the fitter. You can use the Reset All button " + "to restore the inputs to the values from the replay.", do_submit) else: - # Fit is good, we can exit - # Trigger the submit callback via disabling the submit button - self.submit_button.disabled = True + # No need to gatekeep, we aren't running in replay mode + do_submit() def abort_button_handler(self): """ @@ -307,6 +333,17 @@ def cb(accepted): self.show_ok_cancel(f"Are you sure you want to abort? DRAGONS reduce will exit completely.", cb) + def reset_all_button_handler(self): + """ + Used by the reset all button to restore the initial state of this interactive + tool. + + This button handler resets the UI to the initial state. This saved state is either the state + loaded in for a replay from a saved run, or the state captured from record() during post_show() + once the UI was built. + """ + self.load(self.reset_all_state) + def session_ended(self, sess_context, user_satisfied): """ Handle the end of the session by stopping the bokeh server, which @@ -548,11 +585,15 @@ def show_user_message(self, message): # and display those via an alert. It's a workaround # so that here we can send messages to the user from # the bokeh server-side python. - if self._message_holder.text == message: - # need to trigger a change... - self._message_holder.text = f"{message} " + if hasattr(self._message_holder, "text"): + if self._message_holder.text == message: + # need to trigger a change... + self._message_holder.text = f"{message} " + else: + self._message_holder.text = message else: - self._message_holder.text = message + # If we do not yet have a built UI... + _log.info(message) def make_widgets_from_parameters(self, params, reinit_live: bool = True, slider_width: int = 256, add_spacer=False, @@ -667,6 +708,40 @@ def handler(val): return handler + def post_show(self): + """ + Actions to take after showing the visualizer. + + This is broken out separately to capture any actions to perform once the UI is + displayed. Putting this here makes it easier for subclasses to customize the + show() method and still benefit from this final bookkeeping. + """ + if not self.reset_all_state: + self.reset_all_state = self.record() + + def record(self): + """ + Record the state of the interactive interface. + + For now, this record is for information purposes. It may be enhanced in future + to allow the intractive interface to be repopulated from a recorded state. + + Subclasses should call down to this and add their own information to the record + """ + record = dict() + record["primitive_name"] = self.primitive_name + + return record + + def load(self, record): + self.reset_all_state = record + if record["primitive_name"] != self.primitive_name: + _log.warning("While loading interactive data, recorded primitive {} did not match expected primitive {}" + .format(record["primitive_name"], self.primitive_name)) + + def reset_all(self): + self.load(self.reset_all_state) + def select_handler_factory(self, key, reinit_live=False): """ Returns a function that updates the `extras` attribute. @@ -1409,7 +1484,8 @@ def fn(): # We have to defer this as the delete may come via the keypress URL # But we aren't in the PrimitiveVisualizaer so we reference the # document and queue it directly - self.fig.document.add_next_tick_callback(lambda: fn()) + if self.fig.document: + self.fig.document.add_next_tick_callback(lambda: fn()) def finish_regions(self): pass @@ -1583,7 +1659,7 @@ def add_tab(self, child: Instance(bm.layouts.LayoutDOM), title: str): :param title: str Title for the new tab """ - tab_dummy = row(Div(),) + tab_dummy = row(Div(text=""),) tab_child = child self.tab_children.append(child) @@ -1612,7 +1688,13 @@ def clear_old_tab(): # clear the old tab via an event on the UI loop # we don't want to do it right now - wait until the tab change has happened do_later(clear_old_tab) + self.tabs.tabs[new].child.children[0] = self.tab_children[new] + # Have to clear the old tab contents with a future callback or bokeh Tabs interface freaks out + + def fn(): + self.tabs.tabs[old].child.children[0] = self.tab_dummy_children[old] + do_later(fn) class UIParameters: diff --git a/geminidr/interactive/server.py b/geminidr/interactive/server.py index 737fd92a2..133210b80 100644 --- a/geminidr/interactive/server.py +++ b/geminidr/interactive/server.py @@ -19,6 +19,8 @@ # Set to True to tell the interactive code to automatically submit in # order to test the interactive paths automatically +from recipe_system.utils.reduce_recorder import load_replay_interactive_settings, record_interactive + test_mode = False from bokeh.themes import built_in_themes @@ -154,6 +156,7 @@ def _bkapp(doc): doc.template_variables['filename_info'] = _visualizer.filename_info _visualizer.show(doc) + _visualizer.post_show() doc.title = title @@ -318,7 +321,13 @@ def interactive_fitter(visualizer): The visualizer UI to display """ set_visualizer(visualizer) + load_replay_interactive_settings(visualizer) start_server() set_visualizer(None) + if not visualizer.user_satisfied: raise KeyboardInterrupt() + + # return record of the state of the visualizer + record = visualizer.record() + record_interactive(record) diff --git a/recipe_system/doc/rs_UsersManual/reduce.rst b/recipe_system/doc/rs_UsersManual/reduce.rst index b43c241dc..2952fd1df 100644 --- a/recipe_system/doc/rs_UsersManual/reduce.rst +++ b/recipe_system/doc/rs_UsersManual/reduce.rst @@ -131,6 +131,19 @@ always using the "best-match" returned by the local calibration manager. reduce S20161025S0111.fits --user_cal processed_bias:S20161025S0200_bias.fits +Recording Parameters For Reduction +---------------------------------- +When running reduce, you can ues the ``--record`` option to instruct it to record +the parameters used in the reduction. This allows you or someone to repeat an +identical reduction later or to see what parameters were used. To rerun the +reduce, just run with the ``--replay`` option and specify the file you recorded to. + +:: + + reduce --record saved_reduction.json S20161025S0111.fits + reduce --replay saved_reduction.json + + Command Line Options and Switches ================================= The ``reduce`` command help is provided by the ``--help`` option. This help is @@ -270,6 +283,13 @@ Configuration Switches and Options -r display +**--record ** + Save the arguments used for reduce, plus any interactive tweaks made, to the + given json file for later replay. + +**--replay ** + Replay a reduction using the arguments and interactive settings from the given + json file. **--suffix ** Add "suffix" to output filenames at the end of the reduction. diff --git a/recipe_system/reduction/coreReduce.py b/recipe_system/reduction/coreReduce.py index d1400ae0a..ecc725e25 100644 --- a/recipe_system/reduction/coreReduce.py +++ b/recipe_system/reduction/coreReduce.py @@ -12,6 +12,7 @@ class Reduce provides one (1) public method: # coreReduce.py # ------------------------------------------------------------------------------ import os +import re import sys import inspect @@ -32,8 +33,10 @@ class Reduce provides one (1) public method: from recipe_system.utils.errors import ModeError from recipe_system.utils.errors import RecipeNotFound from recipe_system.utils.errors import PrimitivesNotFound +from recipe_system.utils.reduce_recorder import init_reduce_recorder, record_reduction, \ + load_reduce_record, record_reduction_in_ad -from recipe_system.utils.reduce_utils import buildParser +from recipe_system.utils.reduce_utils import buildParser, normalize_args, normalize_upload from recipe_system.utils.reduce_utils import normalize_ucals from recipe_system.utils.reduce_utils import set_btypes from recipe_system.utils.rs_utilities import log_traceback @@ -118,6 +121,15 @@ def __init__(self, sys_args=None): args = buildParser(__version__).parse_args() else: args = buildParser(__version__).parse_args([]) + record = args.record + + self.add_replay_suffix = False + if len(sys.argv) == 3 and sys.argv[1] == '--replay': + recorded_args = load_reduce_record(sys.argv[2]) + args = buildParser(__version__).parse_args(recorded_args) + args = normalize_args(args) + args.upload = normalize_upload(args.upload) + self.add_replay_suffix = True # acquire any new astrodata classes. if args.adpkg: @@ -133,6 +145,11 @@ def __init__(self, sys_args=None): self._upload = args.upload self._output_filenames = None self.recipename = args.recipename if args.recipename else '_default' + if record is not None: + record_file = None + if record: + record_file = record[0] + init_reduce_recorder(record_file) @property def upload(self): @@ -252,10 +269,20 @@ def runr(self): self._output_filenames = [ad.filename for ad in p.streams['main']] raise + for ad in p.streams['main']: + record_reduction_in_ad(ad) + if self.add_replay_suffix: + for ad in p.streams['main']: + m = re.search(r'(.*)\.fits', ad.filename) + if m: + ad.update_filename(suffix="_replayed", strip=False) + self._write_final(p.streams['main']) self._output_filenames = [ad.filename for ad in p.streams['main']] log.stdinfo("\nreduce completed successfully.") + record_reduction() + # -------------------------------- prive ----------------------------------- def _check_files(self, ffiles): """ diff --git a/recipe_system/utils/reduce_recorder.py b/recipe_system/utils/reduce_recorder.py new file mode 100644 index 000000000..52437aa10 --- /dev/null +++ b/recipe_system/utils/reduce_recorder.py @@ -0,0 +1,191 @@ +import json +import sys + +import astrodata + +from gempy.utils import logutils +from recipe_system import __version__ + + +__all__ = ["in_replay", "init_reduce_recorder", "record_interactive", "record_reduction", "load_reduce_record", + "load_replay_interactive_settings", "record_reduction_in_ad", "load_reduce_record_from_ad"] + +reduce_recorder = None +replay_record = None +replay_step = 0 +reduce_filename = None +disable_replay = False +warned_user = False + +log = logutils.get_logger(__name__) + + +def in_replay(): + """ + Check if we are in an active replay. + + This is a utlity call to check if we are currently doing a + replay of a recorded session. + + Returns + ------- + bool : True if we are in a replay, False if not (including if the user aborted the replay by modifying some inputs + earlier) + """ + if replay_record and not disable_replay: + return True + return False + + +def init_reduce_recorder(filename): + """ + Setup the reduce job to record interactive parameters to the named file. + + This call sets up the reduce job to save each interactive session to a + json file for future reuse. + + Parameters + ---------- + filename : str + Name of the file to save the state in + """ + global reduce_recorder + global reduce_filename + reduce_recorder = { + "version": __version__, + "args": sys.argv[1:], + "interactive": [], + } + reduce_filename = filename + + +def record_interactive(record): + """ + Add a json record to the record to be saved for this reduce job. + + This call takes a single dictionary of state representing the current interactive + tool's state and adds it to the set to be saved for this session overall. + + Parameters + ---------- + record : dict + Dictionary describing the state of the current interactive tool + """ + global reduce_recorder + if reduce_recorder is not None: + reduce_recorder["interactive"].append(record) + if replay_record is not None and not disable_replay: + # We're doing a replay, check if this interactive step was modified + validate_replay_step(record) + + +def record_reduction(): + """ + Save a record of this reduction session to the json file. + + This call writes all of the information needed for this reduce session, + including the interactive tools, to a json file. + """ + if reduce_recorder is not None and reduce_filename is not None: + with open(reduce_filename, 'w') as reduce_file: + output = json.dumps(reduce_recorder, indent=4) + reduce_file.write(f"{output}") + + +def record_reduction_in_ad(ad): + if reduce_recorder is not None: + record = json.dumps(reduce_recorder, indent=4) + for ext in ad: + ext.record = record + + +def load_reduce_record(filename): + """ + Load the reduce session from the given save file. + + This call opens a previously saved reduce session from a json file + and prepares it for use by the current reduce. + + Parameters + ---------- + filename : str + Name of the json file to read + """ + if filename.endswith('.json'): + with open(filename, 'r') as record_file: + global replay_record + replay_record = json.loads(record_file.read()) + if replay_record["version"] != __version__: + log.warning("This version of DRAGONS ({}) does not match the version for this replay record: {}" + .format(__version__, replay_record["version"])) + return replay_record["args"] if replay_record else [] + else: + ad = astrodata.open(filename) + return load_reduce_record_from_ad(ad) + + +def load_reduce_record_from_ad(ad): + global replay_record + record = ad[0].record + if record: + replay_record = json.loads(record) + if replay_record["version"] != __version__: + log.warning("This version of DRAGONS ({}) does not match the version for this replay record: {}" + .format(__version__, replay_record["version"])) + return replay_record["args"] if replay_record else [] + + +def load_replay_interactive_settings(visualizer): + """ + Load the current interactive tool state from the record. + + This call initializes an interactive tool based on the current + step in the loaded json file. Each time an interface is loaded, + the system advances to the next saved interactive state to use for + the next tool. + + Parameters + ---------- + visualizer : :class:`~geminidr.interactive.PrimitiveVisualizer` + visualizer to be initialized + """ + if disable_replay: + return + global replay_step + if replay_record and replay_step < len(replay_record["interactive"]): + retval = replay_record["interactive"][replay_step] + replay_step += 1 + visualizer.load(retval) + elif replay_record and replay_step >= len(replay_record["interactive"]): + log.warning("Request for interactive settings beyond that recorded in the replay file. This replay " + "is probably not compatible with your current DRAGONS install.") + + +def validate_replay_step(record): + """ + This call validates the exit state of an interactive step vs what + was recorded. + + Check the output state of an interactive step vs what was recorded. + If the interactive step has been modified, disable the replay functionality + for all remaining steps. + + Parameters + ---------- + record : dict + Dictionary describing state of the interactive step, as would be saved when recording the session + """ + global replay_step + global disable_replay + global warned_user + if replay_record and replay_step-1 < len(replay_record["interactive"]): + retval = replay_record["interactive"][replay_step-1] + if retval != record: + if not warned_user: + log.warning("Interactive settings differ from recorded values, " + "replay turned off for remainder of reduction") + warned_user = True + disable_replay = True + elif replay_record and replay_step >= len(replay_record["interactive"]): + log.warning("Request to validate interactive settings beyond that recorded in the replay file. This replay " + "is probably not compatible with your current DRAGONS install.") diff --git a/recipe_system/utils/reduce_utils.py b/recipe_system/utils/reduce_utils.py index 627aa112b..8afd1ba8d 100644 --- a/recipe_system/utils/reduce_utils.py +++ b/recipe_system/utils/reduce_utils.py @@ -163,6 +163,15 @@ def buildParser(version): help="Load a specific config file, overriding the " "~/.geminidr/rsys.cfg file and the $DRAGONSRC " "environment variable.") + + parser.add_argument("--record", dest='record', + default=None, nargs='*', action=UnitaryArgumentAction, + help="record the reduction process to a json file") + + parser.add_argument("--replay", dest='replay', + default=None, nargs=1, action=UnitaryArgumentAction, + help="replay the reduction process from a json file") + return parser