From ae2cba4d2b5ecbe24f6f91e2c27f8784853b12f6 Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Sat, 21 Jan 2023 19:21:18 +0000 Subject: [PATCH] 'Refactored by Sourcery' --- examples/goos/bend90/bend90.py | 33 ++--- examples/goos/grating_1d/grating_1d.py | 35 +++--- examples/goos/optplan_examples/simple_opt.py | 3 +- .../optplan_examples/simple_opt_reloaded.py | 3 +- examples/invdes/grating_coupler/grating.py | 114 +++++++++-------- examples/invdes/wdm2/wdm2.py | 42 +++---- spins/fdfd_solvers/local_matrix_solvers.py | 7 +- spins/fdfd_solvers/maxwell.py | 10 +- spins/fdfd_solvers/waveguide_mode.py | 31 ++--- spins/fdfd_tools/free_space_sources.py | 53 ++++---- spins/fdfd_tools/functional.py | 15 +-- spins/fdfd_tools/grid.py | 12 +- spins/fdfd_tools/operators.py | 71 +++++------ spins/fdfd_tools/solvers.py | 9 +- spins/fdfd_tools/test_grid.py | 13 +- spins/fdfd_tools/waveguide.py | 6 +- spins/fdfd_tools/waveguide_mode.py | 14 +-- spins/gds/gen_gds.py | 19 ++- spins/gds/parse_gds.py | 45 +++---- spins/goos/array.py | 9 +- spins/goos/compat.py | 6 +- spins/goos/flows.py | 28 ++--- spins/goos/generic.py | 8 +- spins/goos/graph_executor.py | 14 +-- spins/goos/grating.py | 31 ++--- spins/goos/material.py | 4 +- spins/goos/math.py | 59 ++++----- spins/goos/optplan/context.py | 14 +-- spins/goos/optplan/optplan.py | 119 +++++++----------- spins/goos/optplan/schema.py | 48 +++---- spins/goos/optplan/schema_optplan.py | 13 +- spins/goos/optplan/schema_types.py | 11 +- spins/goos/optplan/schema_utils.py | 10 +- spins/goos/schema_registry.py | 12 +- spins/goos/shapes.py | 10 -- spins/goos/test_graph_executor.py | 6 +- spins/goos/util.py | 4 +- spins/goos_sim/maxwell/render.py | 8 +- spins/goos_sim/maxwell/simspace.py | 20 ++- spins/goos_sim/maxwell/simulate.py | 55 ++++---- spins/gridlock/direction.py | 19 +-- spins/gridlock/float_raster.py | 21 ++-- spins/gridlock/grid.py | 59 ++++----- spins/gridlock/selection_matrix.py | 77 ++++++------ spins/invdes/optimization/gradient_descent.py | 6 +- spins/invdes/optimization/problems.py | 9 +- spins/invdes/optimization/scipy_optimizer.py | 7 +- .../test_constrained_optimizer.py | 12 +- spins/invdes/parametrization/cubic_utils.py | 31 ++--- .../grating_parametrization.py | 19 ++- .../invdes/parametrization/parametrization.py | 7 +- .../parametrization/test_parametrization.py | 14 ++- spins/invdes/problem/emobjective.py | 33 ++--- spins/invdes/problem/farfield.py | 74 ++++++----- spins/invdes/problem/ffobjective.py | 57 +++------ spins/invdes/problem/graph_executor.py | 14 +-- spins/invdes/problem/grating.py | 30 ++--- spins/invdes/problem/objective.py | 20 +-- spins/invdes/problem/selection_matrix.py | 35 +++--- spins/invdes/problem/simulation.py | 10 +- .../problem/slack_optimization_problem.py | 16 +-- spins/invdes/problem/structure_objectives.py | 28 ++--- spins/invdes/problem/test_emobjective.py | 4 +- spins/invdes/problem/test_graph_executor.py | 6 +- spins/invdes/problem/test_objective.py | 20 +-- spins/invdes/problem_graph/creator_em.py | 26 ++-- spins/invdes/problem_graph/creator_param.py | 10 +- .../problem_graph/functions/poynting.py | 2 +- spins/invdes/problem_graph/grating.py | 17 +-- .../invdes/problem_graph/log_tools/loader.py | 21 ++-- .../problem_graph/log_tools/monitor_spec.py | 24 ++-- .../invdes/problem_graph/log_tools/plotter.py | 37 +++--- spins/invdes/problem_graph/optplan/context.py | 20 ++- spins/invdes/problem_graph/optplan/io.py | 24 ++-- spins/invdes/problem_graph/optplan/optplan.py | 29 +++-- .../problem_graph/optplan/schema_function.py | 8 +- .../problem_graph/optplan/test_context.py | 16 +-- spins/invdes/problem_graph/schema_utils.py | 10 +- spins/invdes/problem_graph/simspace.py | 23 ++-- spins/invdes/problem_graph/solver.py | 13 +- spins/invdes/problem_graph/test_optplan.py | 14 ++- spins/invdes/problem_graph/workspace.py | 77 +++++------- 82 files changed, 856 insertions(+), 1167 deletions(-) diff --git a/examples/goos/bend90/bend90.py b/examples/goos/bend90/bend90.py index 85ef959..57ac133 100644 --- a/examples/goos/bend90/bend90.py +++ b/examples/goos/bend90/bend90.py @@ -109,16 +109,18 @@ def make_objective(eps: goos.Shape, stage: str, sim_3d: bool): pml_thickness = [400, 400, 400, 400, 0, 0] sim = maxwell.fdfd_simulation( - name="sim_{}".format(stage), + name=f"sim_{stage}", wavelength=1550, eps=eps, solver_info=solver_info, sources=[ - maxwell.WaveguideModeSource(center=[-1400, 0, 0], - extents=[0, 2500, 1000], - normal=[1, 0, 0], - mode_num=0, - power=1) + maxwell.WaveguideModeSource( + center=[-1400, 0, 0], + extents=[0, 2500, 1000], + normal=[1, 0, 0], + mode_num=0, + power=1, + ) ], simulation_space=maxwell.SimulationSpace( mesh=maxwell.UniformMesh(dx=40), @@ -126,21 +128,24 @@ def make_objective(eps: goos.Shape, stage: str, sim_3d: bool): center=[0, 0, 0], extents=[4000, 4000, sim_z_extent], ), - pml_thickness=pml_thickness), + pml_thickness=pml_thickness, + ), background=goos.material.Material(index=1.0), outputs=[ maxwell.Epsilon(name="eps"), maxwell.ElectricField(name="field"), - maxwell.WaveguideModeOverlap(name="overlap", - center=[0, 1400, 0], - extents=[2500, 0, 1000], - normal=[0, 1, 0], - mode_num=0, - power=1), + maxwell.WaveguideModeOverlap( + name="overlap", + center=[0, 1400, 0], + extents=[2500, 0, 1000], + normal=[0, 1, 0], + mode_num=0, + power=1, + ), ], ) - obj = goos.rename(-goos.abs(sim["overlap"]), name="obj_{}".format(stage)) + obj = goos.rename(-goos.abs(sim["overlap"]), name=f"obj_{stage}") return obj, sim diff --git a/examples/goos/grating_1d/grating_1d.py b/examples/goos/grating_1d/grating_1d.py index a7d3fd3..5f1be4c 100644 --- a/examples/goos/grating_1d/grating_1d.py +++ b/examples/goos/grating_1d/grating_1d.py @@ -204,7 +204,7 @@ def make_objective(eps: goos.Shape, stage: str, params: Options): 2000 + pml_thick * 2) sim = maxwell.fdfd_simulation( - name="sim_{}".format(stage), + name=f"sim_{stage}", wavelength=params.wlen, eps=eps, solver=solver, @@ -212,8 +212,9 @@ def make_objective(eps: goos.Shape, stage: str, params: Options): maxwell.GaussianSource( w0=params.beam_width / 2, center=[ - params.coupler_len / 2, 0, - params.wg_thickness / 2 + params.beam_dist + params.coupler_len / 2, + 0, + params.wg_thickness / 2 + params.beam_dist, ], extents=[params.beam_extents, 0, 0], normal=[0, 0, -1], @@ -221,7 +222,8 @@ def make_objective(eps: goos.Shape, stage: str, params: Options): theta=np.deg2rad(params.source_angle_deg), psi=np.pi / 2, polarization_angle=0, - normalize_by_sim=True) + normalize_by_sim=True, + ) ], simulation_space=maxwell.SimulationSpace( mesh=maxwell.UniformMesh(dx=params.dx), @@ -229,22 +231,25 @@ def make_objective(eps: goos.Shape, stage: str, params: Options): center=[(sim_left_x + sim_right_x) / 2, 0, sim_z_center], extents=[sim_right_x - sim_left_x, 0, sim_z_extent], ), - pml_thickness=[pml_thick, pml_thick, 0, 0, pml_thick, pml_thick]), + pml_thickness=[pml_thick, pml_thick, 0, 0, pml_thick, pml_thick], + ), background=goos.material.Material(index=params.eps_bg), outputs=[ maxwell.Epsilon(name="eps"), maxwell.ElectricField(name="field"), - maxwell.WaveguideModeOverlap(name="overlap", - center=[-params.wg_len / 2, 0, 0], - extents=[0, 1000, 2000], - normal=[-1, 0, 0], - mode_num=0, - power=1), + maxwell.WaveguideModeOverlap( + name="overlap", + center=[-params.wg_len / 2, 0, 0], + extents=[0, 1000, 2000], + normal=[-1, 0, 0], + mode_num=0, + power=1, + ), ], ) obj = (1 - goos.abs(sim["overlap"]))**2 - obj = goos.rename(obj, name="obj_{}".format(stage)) + obj = goos.rename(obj, name=f"obj_{stage}") return obj, sim @@ -273,12 +278,10 @@ def visualize(folder: str, step: int): plt.figure() plt.subplot(1, 2, 1) - plt.imshow( - np.abs(data["monitor_data"]["sim_{}.eps".format(stage)][1].squeeze())) + plt.imshow(np.abs(data["monitor_data"][f"sim_{stage}.eps"][1].squeeze())) plt.colorbar() plt.subplot(1, 2, 2) - plt.imshow( - np.abs(data["monitor_data"]["sim_{}.field".format(stage)][1].squeeze())) + plt.imshow(np.abs(data["monitor_data"][f"sim_{stage}.field"][1].squeeze())) plt.colorbar() plt.show() diff --git a/examples/goos/optplan_examples/simple_opt.py b/examples/goos/optplan_examples/simple_opt.py index 29e3005..0834e18 100644 --- a/examples/goos/optplan_examples/simple_opt.py +++ b/examples/goos/optplan_examples/simple_opt.py @@ -27,8 +27,7 @@ def main(save_folder: str): # More efficient to call `eval_nodes` when evaluating multiple nodes # at the same time. x_val, y_val, obj_val = plan.eval_nodes([x, y, obj]) - print("x: {}, y: {}, obj: {}".format(x_val.array, y_val.array, - obj_val.array)) + print(f"x: {x_val.array}, y: {y_val.array}, obj: {obj_val.array}") if __name__ == "__main__": diff --git a/examples/goos/optplan_examples/simple_opt_reloaded.py b/examples/goos/optplan_examples/simple_opt_reloaded.py index 2d74e4f..e231272 100644 --- a/examples/goos/optplan_examples/simple_opt_reloaded.py +++ b/examples/goos/optplan_examples/simple_opt_reloaded.py @@ -21,8 +21,7 @@ def main(save_folder: str, checkpoint_file: str): # Show that we have retrieved the values. x_val, y_val, obj_val = plan.eval_nodes([x, y, obj]) - print("x: {}, y: {}, obj: {}".format(x_val.array, y_val.array, - obj_val.array)) + print(f"x: {x_val.array}, y: {y_val.array}, obj: {obj_val.array}") if __name__ == "__main__": diff --git a/examples/invdes/grating_coupler/grating.py b/examples/invdes/grating_coupler/grating.py index 175313a..fdd7fdd 100644 --- a/examples/invdes/grating_coupler/grating.py +++ b/examples/invdes/grating_coupler/grating.py @@ -307,7 +307,8 @@ def create_objective( ) # Append to monitor list for each wavelength monitor_list.append( - optplan.FieldMonitor(name="mon_eps_" + str(wlen), function=epsilon)) + optplan.FieldMonitor(name=f"mon_eps_{str(wlen)}", function=epsilon) + ) # Add a Gaussian source that is angled at 10 degrees. sim = optplan.FdfdSimulation( @@ -329,11 +330,12 @@ def create_objective( ) monitor_list.append( optplan.FieldMonitor( - name="mon_field_" + str(wlen), + name=f"mon_field_{str(wlen)}", function=sim, normal=[0, 1, 0], center=[0, 0, 0], - )) + ) + ) wg_overlap = optplan.WaveguideModeOverlap( center=[-grating_len / 2 - 1000, 0, wg_thickness / 2], @@ -346,7 +348,9 @@ def create_objective( optplan.Overlap(simulation=sim, overlap=wg_overlap))**2 monitor_list.append( optplan.SimpleMonitor( - name="mon_power_" + str(wlen), function=power)) + name=f"mon_power_{str(wlen)}", function=power + ) + ) if not MINIMIZE_BACKREFLECTION: # Spins minimizes the objective function, so to make `power` maximized, @@ -373,7 +377,9 @@ def create_objective( optplan.Overlap(simulation=refl_sim, overlap=wg_overlap))**2 monitor_list.append( optplan.SimpleMonitor( - name="mon_refl_power_" + str(wlen), function=refl_power)) + name=f"mon_refl_power_{str(wlen)}", function=refl_power + ) + ) # We now have two sub-objectives: Maximize transmission and minimize # back-reflection, so we must an objective that defines the appropriate @@ -423,14 +429,11 @@ def create_transformations( Returns: A list of transformations. """ - # Setup empty transformation list. - trans_list = [] - # First do continuous relaxation optimization. cont_param = optplan.PixelParametrization( simulation_space=sim_space, init_method=optplan.UniformInitializer(min_val=0, max_val=1)) - trans_list.append( + trans_list = [ optplan.Transformation( name="opt_cont", parametrization=cont_param, @@ -440,12 +443,14 @@ def create_transformations( monitor_lists=optplan.ScipyOptimizerMonitorList( callback_monitors=monitors, start_monitors=monitors, - end_monitors=monitors), + end_monitors=monitors, + ), optimization_options=optplan.ScipyOptimizerOptions( - maxiter=cont_iters), + maxiter=cont_iters + ), ), - )) - + ) + ] # If true, do another round of continous optimization with a discreteness bias. if DISCRETENESS_PENALTY: # Define parameters necessary to normaize discrete penalty term @@ -496,37 +501,41 @@ def create_transformations( # room later on. disc_param = optplan.GratingParametrization( simulation_space=sim_space, inverted=True) - trans_list.append( - optplan.Transformation( - name="cont_to_disc", - parametrization=disc_param, - transformation=optplan.GratingEdgeFitTransformation( - parametrization=cont_param, - min_feature=cont_to_disc_factor * min_feature))) - - # Discrete optimization. - trans_list.append( - optplan.Transformation( - name="opt_disc", - parametrization=disc_param, - transformation=optplan.ScipyOptimizerTransformation( - optimizer="SLSQP", - objective=obj, - constraints_ineq=[ - optplan.GratingFeatureConstraint( - min_feature_size=min_feature, - simulation_space=sim_space, - boundary_constraint_scale=1.0, - ) - ], - monitor_lists=optplan.ScipyOptimizerMonitorList( - callback_monitors=monitors, - start_monitors=monitors, - end_monitors=monitors), - optimization_options=optplan.ScipyOptimizerOptions( - maxiter=disc_iters), + trans_list.extend( + ( + optplan.Transformation( + name="cont_to_disc", + parametrization=disc_param, + transformation=optplan.GratingEdgeFitTransformation( + parametrization=cont_param, + min_feature=cont_to_disc_factor * min_feature, + ), ), - )) + optplan.Transformation( + name="opt_disc", + parametrization=disc_param, + transformation=optplan.ScipyOptimizerTransformation( + optimizer="SLSQP", + objective=obj, + constraints_ineq=[ + optplan.GratingFeatureConstraint( + min_feature_size=min_feature, + simulation_space=sim_space, + boundary_constraint_scale=1.0, + ) + ], + monitor_lists=optplan.ScipyOptimizerMonitorList( + callback_monitors=monitors, + start_monitors=monitors, + end_monitors=monitors, + ), + optimization_options=optplan.ScipyOptimizerOptions( + maxiter=disc_iters + ), + ), + ), + ) + ) return trans_list @@ -561,7 +570,7 @@ def view_opt_quick(save_folder: str) -> None: log_data = pickle.load(fp) for key, data in log_data["monitor_data"].items(): if np.isscalar(data): - print("{}: {}".format(key, data.squeeze())) + print(f"{key}: {data.squeeze()}") def resume_opt(save_folder: str) -> None: @@ -609,14 +618,15 @@ def gen_gds(save_folder: str, grating_len: float, wg_width: float) -> None: coords = np.insert(coords, 0, 0, axis=0) coords = np.insert(coords, -1, grating_len, axis=0) - # `coords` now contains the location of the grating edges. Now draw a - # series of rectangles to represent the grating. - grating_poly = [] - for i in range(0, len(coords), 2): - grating_poly.append( - ((coords[i], -wg_width / 2), (coords[i], wg_width / 2), - (coords[i + 1], wg_width / 2), (coords[i + 1], -wg_width / 2))) - + grating_poly = [ + ( + (coords[i], -wg_width / 2), + (coords[i], wg_width / 2), + (coords[i + 1], wg_width / 2), + (coords[i + 1], -wg_width / 2), + ) + for i in range(0, len(coords), 2) + ] # Save the grating to `grating.gds`. grating = gdspy.Cell("GRATING", exclude_from_current=True) grating.add(gdspy.PolygonSet(grating_poly, 100)) diff --git a/examples/invdes/wdm2/wdm2.py b/examples/invdes/wdm2/wdm2.py index 9878c77..291d1b3 100644 --- a/examples/invdes/wdm2/wdm2.py +++ b/examples/invdes/wdm2/wdm2.py @@ -77,11 +77,7 @@ def create_sim_space(gds_fg: str, gds_bg: str) -> optplan.SimulationSpace: A `SimulationSpace` description. """ mat_oxide = optplan.Material(index=optplan.ComplexNumber(real=1.5)) - if SIM_2D: - device_index = SI_2D_INDEX - else: - device_index = SI_3D_INDEX - + device_index = SI_2D_INDEX if SIM_2D else SI_3D_INDEX mat_stack = optplan.GdsMaterialStack( background=mat_oxide, stack=[ @@ -185,11 +181,12 @@ def create_objective(sim_space: optplan.SimulationSpace # Take a field slice through the z=0 plane to save each iteration. monitor_list.append( optplan.FieldMonitor( - name="field{}".format(wlen), + name=f"field{wlen}", function=sim, normal=[0, 0, 1], center=[0, 0, 0], - )) + ) + ) if wlen == 1300: # Only save the permittivity at 1300 nm because the permittivity # at 1550 nm is the same (as a constant permittivity value was @@ -205,15 +202,9 @@ def create_objective(sim_space: optplan.SimulationSpace power = optplan.abs(overlap)**2 power_objs.append(power) - monitor_list.append( - optplan.SimpleMonitor(name="power{}".format(wlen), function=power)) - - # Spins minimizes the objective function, so to make `power` maximized, - # we minimize `1 - power`. - obj = 0 - for power in power_objs: - obj += (1 - power)**2 + monitor_list.append(optplan.SimpleMonitor(name=f"power{wlen}", function=power)) + obj = sum((1 - power)**2 for power in power_objs) monitor_list.append(optplan.SimpleMonitor(name="objective", function=obj)) return obj, monitor_list @@ -266,7 +257,7 @@ def create_transformations( for stage in range(num_stages): trans_list.append( optplan.Transformation( - name="opt_cont{}".format(stage), + name=f"opt_cont{stage}", parametrization=param, transformation=optplan.ScipyOptimizerTransformation( optimizer="L-BFGS-B", @@ -274,23 +265,26 @@ def create_transformations( monitor_lists=optplan.ScipyOptimizerMonitorList( callback_monitors=monitors, start_monitors=monitors, - end_monitors=monitors), + end_monitors=monitors, + ), optimization_options=optplan.ScipyOptimizerOptions( - maxiter=iters), + maxiter=iters + ), ), - )) + ) + ) if stage < num_stages - 1: # Make the structure more discrete. trans_list.append( optplan.Transformation( - name="sigmoid_change{}".format(stage), + name=f"sigmoid_change{stage}", parametrization=param, - # The larger the sigmoid strength value, the more "discrete" - # structure will be. transformation=optplan.CubicParamSigmoidStrength( - value=4 * (stage + 1)), - )) + value=4 * (stage + 1) + ), + ) + ) return trans_list diff --git a/spins/fdfd_solvers/local_matrix_solvers.py b/spins/fdfd_solvers/local_matrix_solvers.py index 7186b06..81cc615 100644 --- a/spins/fdfd_solvers/local_matrix_solvers.py +++ b/spins/fdfd_solvers/local_matrix_solvers.py @@ -75,12 +75,7 @@ def solve(self, x = self.solve_matrix_equation(A.astype(np.complex128).tocsr(), b) - if adjoint: - x0 = Pl.H @ x - else: - x0 = Pr @ x - - return x0 + return Pl.H @ x if adjoint else Pr @ x def _worker_simulate(A, b, solver): diff --git a/spins/fdfd_solvers/maxwell.py b/spins/fdfd_solvers/maxwell.py index 567787d..5814374 100644 --- a/spins/fdfd_solvers/maxwell.py +++ b/spins/fdfd_solvers/maxwell.py @@ -69,11 +69,9 @@ def write_field(filename_prefix: str, field): """ xyz = 'xyz' for k in range(3): - file_prefix = filename_prefix + '_' + xyz[k] - write_to_hd5(file_prefix + 'r', 'data', - np.real(field[k]).astype(np.float64)) - write_to_hd5(file_prefix + 'i', 'data', - np.imag(field[k]).astype(np.float64)) + file_prefix = f'{filename_prefix}_{xyz[k]}' + write_to_hd5(f'{file_prefix}r', 'data', np.real(field[k]).astype(np.float64)) + write_to_hd5(f'{file_prefix}i', 'data', np.imag(field[k]).astype(np.float64)) class MaxwellSolver: @@ -98,7 +96,7 @@ def __init__(self, """ # If there is no port specified for server, append the default. if ':' not in server: - server += ':' + str(MaxwellSolver.DEFAULT_MAXWELL_SERVER_PORT) + server += f':{str(MaxwellSolver.DEFAULT_MAXWELL_SERVER_PORT)}' self.shape = shape self.server = server self.err_thresh = err_thresh diff --git a/spins/fdfd_solvers/waveguide_mode.py b/spins/fdfd_solvers/waveguide_mode.py index da605ec..7e3d348 100644 --- a/spins/fdfd_solvers/waveguide_mode.py +++ b/spins/fdfd_solvers/waveguide_mode.py @@ -99,14 +99,12 @@ def solve_waveguide_mode_2d( wavenumber = np.sin(np.real(wavenumber * dx / 2)) / (dx / 2) + np.imag(wavenumber) shape = [d.size for d in dxes[0]] - fields = { + return { 'wavenumber': wavenumber, 'E': unvec(e, shape), 'H': unvec(h, shape), } - return fields - def solve_waveguide_mode_3d( mode_number: int, @@ -349,14 +347,12 @@ def solve_waveguide_mode( H[a][tuple(slices)] = fields_2d['H'][o][:, :, None].transpose( reverse_order) - results = { + return { 'wavenumber': fields_2d['wavenumber'], 'H': H, 'E': E, } - return results - def compute_source( E: field_t, @@ -392,8 +388,7 @@ def compute_source( M = [None] * 3 src_order = np.roll(range(3), -axis) - exp_iphi = np.exp( - 1j * polarity * wavenumber * dxes[1][int(axis)][slices[int(axis)]]) + exp_iphi = np.exp(1j * polarity * wavenumber * dxes[1][axis][slices[axis]]) J[src_order[0]] = np.zeros_like(E[0]) J[src_order[1]] = +exp_iphi * H[src_order[2]] * polarity J[src_order[2]] = -exp_iphi * H[src_order[1]] * polarity @@ -408,7 +403,7 @@ def compute_source( for k in range(3): J[k] += Jm_iw[k] / (-1j * omega) - return J / dxes[1][int(axis)][slices[int(axis)]] + return J / dxes[1][axis][slices[axis]] def compute_source_angle( @@ -439,15 +434,15 @@ def compute_source_angle( :param mu: Magnetic permeability (default 1 everywhere) :return: J distribution for the unidirectional source """ - if polarity == 1: - Mslice = copy.deepcopy(slices) - Jslice = copy.deepcopy(slices) - Jslice[axis] = slice(Jslice[axis].start + 1, Jslice[axis].stop + 1) - elif polarity == -1: + if polarity == -1: Mslice = copy.deepcopy(slices) Mslice[axis] = slice(Mslice[axis].start - 1, Mslice[axis].stop - 1) Jslice = copy.deepcopy(slices) + elif polarity == 1: + Mslice = copy.deepcopy(slices) + Jslice = copy.deepcopy(slices) + Jslice[axis] = slice(Jslice[axis].start + 1, Jslice[axis].stop + 1) if mu is None: mu = np.ones_like(eps) @@ -530,7 +525,7 @@ def compute_overlap_e( npts = E[0].size dn = np.zeros(npts * 3, dtype=int) - dn[0:npts] = 1 + dn[:npts] = 1 dn = np.roll(dn, npts * axis) e2h = operators.e2h(omega, dxes, mu) @@ -744,9 +739,7 @@ def build_waveguide_source(omega: complex, dxes: List[np.ndarray], for k in range(len(J)): J[k] *= np.sqrt(power) - if get_wavenumber: - return J, wgmode_result['wavenumber'] - return J + return (J, wgmode_result['wavenumber']) if get_wavenumber else J def build_waveguide_source_angle(omega: complex, @@ -960,7 +953,7 @@ def build_overlap(omega: complex, dxes: List[np.ndarray], eps: List[np.ndarray], axis = axis.value if np.abs(polarity) != 1: - raise ValueError("Polarity should be +/- 1, got {}".format(polarity)) + raise ValueError(f"Polarity should be +/- 1, got {polarity}") sim_params = { 'omega': omega, diff --git a/spins/fdfd_tools/free_space_sources.py b/spins/fdfd_tools/free_space_sources.py index 04d1911..54a614b 100644 --- a/spins/fdfd_tools/free_space_sources.py +++ b/spins/fdfd_tools/free_space_sources.py @@ -18,23 +18,25 @@ def rotation_matrix(vec: np.ndarray, angle: float) -> np.ndarray: """Matrix rotates around the vector. """ - R = np.array([[ - COS(angle) + vec[0]**2 * (1 - COS(angle)), - vec[0] * vec[1] * (1 - COS(angle)) - vec[2] * SIN(angle), - vec[0] * vec[2] * (1 - COS(angle)) + vec[1] * SIN(angle) - ], - [ - vec[0] * vec[1] * (1 - COS(angle)) + vec[2] * SIN(angle), - COS(angle) + vec[1]**2 * (1 - COS(angle)), - vec[1] * vec[2] * (1 - COS(angle)) - vec[0] * SIN(angle) - ], - [ - vec[0] * vec[2] * (1 - COS(angle)) - vec[1] * SIN(angle), - vec[1] * vec[2] * (1 - COS(angle)) + vec[0] * SIN(angle), - COS(angle) + vec[2]**2 * (1 - COS(angle)) - ]]) - - return R + return np.array( + [ + [ + COS(angle) + vec[0] ** 2 * (1 - COS(angle)), + vec[0] * vec[1] * (1 - COS(angle)) - vec[2] * SIN(angle), + vec[0] * vec[2] * (1 - COS(angle)) + vec[1] * SIN(angle), + ], + [ + vec[0] * vec[1] * (1 - COS(angle)) + vec[2] * SIN(angle), + COS(angle) + vec[1] ** 2 * (1 - COS(angle)), + vec[1] * vec[2] * (1 - COS(angle)) - vec[0] * SIN(angle), + ], + [ + vec[0] * vec[2] * (1 - COS(angle)) - vec[1] * SIN(angle), + vec[1] * vec[2] * (1 - COS(angle)) + vec[0] * SIN(angle), + COS(angle) + vec[2] ** 2 * (1 - COS(angle)), + ], + ] + ) def gaussian_beam_z_axis_x_pol(x_grid, y_grid, z_grid, w0, center, R, omega, @@ -184,21 +186,21 @@ def scalar2rotated_vector_fields(eps_grid: Grid, E[a] = np.sqrt(power) * E_fields[o].transpose(reverse_order) H[a] = np.sqrt(power) * H_fields[o].transpose(reverse_order) else: - E[a][tuple(slices)] = np.sqrt(power) * E_fields[o][tuple( - [slices[i] for i in order])].transpose(reverse_order) - H[a][tuple(slices)] = np.sqrt(power) * H_fields[o][tuple( - [slices[i] for i in order])].transpose(reverse_order) + E[a][tuple(slices)] = np.sqrt(power) * E_fields[o][ + tuple(slices[i] for i in order) + ].transpose(reverse_order) + H[a][tuple(slices)] = np.sqrt(power) * H_fields[o][ + tuple(slices[i] for i in order) + ].transpose(reverse_order) wavevector[a] = bloch_vector[o] - results = { + return { 'wavevector': wavevector, 'H': H, 'E': E, } - return results - def build_plane_wave_source(eps_grid: Grid, omega: float, @@ -427,11 +429,10 @@ def build_gaussian_source(eps_grid: Grid, if polarity == -1: ind = slices[axis].start field_slices[axis] = slice(None, ind) - J_slices[axis] = slice(ind - 1, ind + 1) else: ind = slices[axis].stop - 1 field_slices[axis] = slice(ind, None) - J_slices[axis] = slice(ind - 1, ind + 1) + J_slices[axis] = slice(ind - 1, ind + 1) E = np.zeros_like(fields['E']) for i in range(3): E[i][tuple(field_slices)] = fields['E'][i][tuple(field_slices)] diff --git a/spins/fdfd_tools/functional.py b/spins/fdfd_tools/functional.py index c0fd2d9..65707e8 100644 --- a/spins/fdfd_tools/functional.py +++ b/spins/fdfd_tools/functional.py @@ -87,10 +87,7 @@ def op_mu(e): curls = ch([m * y for m, y in zip(mu, ce(e))]) return [c - omega**2 * p * x for c, p, x in zip(curls, epsilon, e)] - if np.any(np.equal(mu, None)): - return op_1 - else: - return op_mu + return op_1 if np.any(np.equal(mu, None)) else op_mu def eh_full(omega: complex, @@ -117,10 +114,7 @@ def op_mu(e, h): return ([c - 1j * omega * p * x for c, p, x in zip(ch(h), epsilon, e)], [c + 1j * omega * m * y for c, m, y in zip(ce(e), mu, h)]) - if np.any(np.equal(mu, None)): - return op_1 - else: - return op_mu + return op_1 if np.any(np.equal(mu, None)) else op_mu def e2h( @@ -145,7 +139,4 @@ def e2h_1_1(e): def e2h_mu(e): return [y / (-1j * omega * m) for y, m in zip(A2(e), mu)] - if np.any(np.equal(mu, None)): - return e2h_1_1 - else: - return e2h_mu + return e2h_1_1 if np.any(np.equal(mu, None)) else e2h_mu diff --git a/spins/fdfd_tools/grid.py b/spins/fdfd_tools/grid.py index f769a1f..e6f964a 100644 --- a/spins/fdfd_tools/grid.py +++ b/spins/fdfd_tools/grid.py @@ -140,11 +140,7 @@ def l_d(x): def l_d(x): return (x - bound) / (pos[-1] - bound) - if thickness == 0: - slc = slice(None) - else: - slc = slice(-thickness, None) - + slc = slice(None) if thickness == 0 else slice(-thickness, None) dx_ai[slc] *= 1 + 1j * s_function(l_d(pos_a[slc])) / d / s_correction dx_bi[slc] *= 1 + 1j * s_function(l_d(pos_b[slc])) / d / s_correction @@ -253,15 +249,15 @@ def MeshBox(x, x0, xn, DX, grad_Mesh): # size give at that point xs = [SimBorders[0]] while xs[-1] < SimBorders[1]: - xs = xs + [xs[-1] + dxv[int((xs[-1] - x[0]) // step)]] + xs += [xs[-1] + dxv[int((xs[-1] - x[0]) // step)]] xs = (xs / np.max(xs) * SimBorders[1]) ys = [SimBorders[2]] while ys[-1] < SimBorders[3]: - ys = ys + [ys[-1] + dyv[int((ys[-1] - y[0]) // step)]] + ys += [ys[-1] + dyv[int((ys[-1] - y[0]) // step)]] ys = (ys / np.max(ys) * SimBorders[3]) zs = [SimBorders[4]] while zs[-1] < SimBorders[5]: - zs = zs + [zs[-1] + dzv[int((zs[-1] - z[0]) // step)]] + zs += [zs[-1] + dzv[int((zs[-1] - z[0]) // step)]] zs = (zs / np.max(zs) * SimBorders[5]) # return results diff --git a/spins/fdfd_tools/operators.py b/spins/fdfd_tools/operators.py index ffedcb6..c5a21c8 100644 --- a/spins/fdfd_tools/operators.py +++ b/spins/fdfd_tools/operators.py @@ -94,8 +94,7 @@ def e_full(omega: complex, else: m_div = sparse.diags(1 / mu) - op = pe @ (ch @ pm @ m_div @ ce - omega**2 * e) @ pe - return op + return pe @ (ch @ pm @ m_div @ ce - omega**2 * e) @ pe def e_full_preconditioners( @@ -171,13 +170,8 @@ def h_full(omega: complex, pm = sparse.diags(np.where(pmc, 0, 1)) # Set pe to (not PMC) e_div = sparse.diags(1 / epsilon) - if mu is None: - m = sparse.eye(epsilon.size) - else: - m = sparse.diags(mu) - - A = pm @ (ec @ pe @ e_div @ hc - omega**2 * m) @ pm - return A + m = sparse.eye(epsilon.size) if mu is None else sparse.diags(mu) + return pm @ (ec @ pe @ e_div @ hc - omega**2 * m) @ pm def eh_full(omega: complex, @@ -234,8 +228,7 @@ def eh_full(omega: complex, A1 = pe @ curl_h(dxes, bloch_vec, shift_orthogonal) @ pm A2 = pm @ curl_e(dxes, bloch_vec, shift_orthogonal) @ pe - A = sparse.bmat([[-iwe, A1], [A2, iwm]]) - return A + return sparse.bmat([[-iwe, A1], [A2, iwm]]) def curl_h(dxes: dx_lists_t, @@ -336,14 +329,9 @@ def h2e(omega: complex, if bloch_vec is None: bloch_vec = np.zeros(3) - op = sparse.diags(1 / (1j * omega * eps)) @ curl_h(dxes, bloch_vec, - shift_orthogonal) - - #TODO: implement pmc - #if not np.any(np.equal(pmc, None)): - # op = sparse.diags(np.where(pmc, 0, 1)) @ op - - return op + return sparse.diags(1 / (1j * omega * eps)) @ curl_h( + dxes, bloch_vec, shift_orthogonal + ) def m2j(omega: complex, @@ -438,9 +426,9 @@ def vec_cross(b: vfield_t) -> sparse.spmatrix: if len(b.shape) == 1: n = b.shape[0] // 3 B = [ - sparse.diags(b[0:n]), - sparse.diags(b[n:2 * n]), - sparse.diags(b[2 * n:3 * n]) + sparse.diags(b[:n]), + sparse.diags(b[n : 2 * n]), + sparse.diags(b[2 * n : 3 * n]), ] elif b.shape[1] == 3: B = [sparse.diags(c) for c in np.split(b, 3)] @@ -456,7 +444,7 @@ def avgf(axis: int, shape: List[int]) -> sparse.spmatrix: :return: Sparse matrix for forward average operation """ if len(shape) not in (2, 3): - raise Exception('Invalid shape: {}'.format(shape)) + raise Exception(f'Invalid shape: {shape}') n = np.prod(shape) return 0.5 * (sparse.eye(n) + rotation_bloch_shift(axis, shape, 1)) @@ -471,7 +459,7 @@ def avgb(axis: int, shape: List[int]) -> sparse.spmatrix: :return: Sparse matrix for backward average operation """ if len(shape) not in (2, 3): - raise Exception('Invalid shape: {}'.format(shape)) + raise Exception(f'Invalid shape: {shape}') n = np.prod(shape) return 0.5 * (sparse.eye(n) + rotation_bloch_shift(axis, shape, -1)) @@ -492,10 +480,9 @@ def rotation_bloch(axis: int, :return sparse matrix for performing the circular shift ''' if len(shape) not in (2, 3): - raise Exception('Invalid shape: {}'.format(shape)) + raise Exception(f'Invalid shape: {shape}') if axis not in range(len(shape)): - raise Exception('Invalid direction: {}, shape is {}'.format( - axis, shape)) + raise Exception(f'Invalid direction: {axis}, shape is {shape}') if shift_distance not in (-1, 1): raise Exception('Shift must be in (-1,1)') @@ -557,7 +544,7 @@ def deriv_forward(dx_e: List[np.ndarray], shape = [s.size for s in dx_e] n = np.prod(shape) - phase = [bloch_vec[n] * np.sum(dx_e[n]) for n in range(0, len(dx_e))] + phase = [bloch_vec[n] * np.sum(dx_e[n]) for n in range(len(dx_e))] dx_e_expanded = np.meshgrid(*dx_e, indexing='ij') def deriv(axis, phase): @@ -586,7 +573,7 @@ def deriv_back(dx_h: List[np.ndarray], shape = [s.size for s in dx_h] n = np.prod(shape) - phase = [bloch_vec[n] * np.sum(dx_h[n]) for n in range(0, len(dx_h))] + phase = [bloch_vec[n] * np.sum(dx_h[n]) for n in range(len(dx_h))] dx_h_expanded = np.meshgrid(*dx_h, indexing='ij') def deriv(axis, phase): @@ -649,9 +636,7 @@ def rotation_bloch_shift(axis: int, ind0, shape, mode='wrap', order='F').flatten(order='F') col_ind = np.ravel_multi_index( ind, shape, mode='wrap', order='F').flatten(order='F') - A = sparse.csr_matrix((data.flatten(order='F'), (row_ind, col_ind))) - - return A + return sparse.csr_matrix((data.flatten(order='F'), (row_ind, col_ind))) def deriv_forward_shift(dx_e: List[np.ndarray], @@ -761,10 +746,13 @@ def poynting_e_cross(e: vfield_t, dxes: dx_lists_t) -> sparse.spmatrix: n = np.prod(shape) zero = sparse.csr_matrix((n, n)) - P = sparse.bmat([[zero, -fx @ Ez @ bz @ dbgy, fx @ Ey @ by @ dbgz], - [fy @ Ez @ bz @ dbgx, zero, -fy @ Ex @ bx @ dbgz], - [-fz @ Ey @ by @ dbgx, fz @ Ex @ bx @ dbgy, zero]]) - return P + return sparse.bmat( + [ + [zero, -fx @ Ez @ bz @ dbgy, fx @ Ey @ by @ dbgz], + [fy @ Ez @ bz @ dbgx, zero, -fy @ Ex @ bx @ dbgz], + [-fz @ Ey @ by @ dbgx, fz @ Ex @ bx @ dbgy, zero], + ] + ) def poynting_h_cross(h: vfield_t, dxes: dx_lists_t) -> sparse.spmatrix: @@ -793,10 +781,13 @@ def poynting_h_cross(h: vfield_t, dxes: dx_lists_t) -> sparse.spmatrix: n = np.prod(shape) zero = sparse.csr_matrix((n, n)) - P = sparse.bmat([[zero, -by @ Hz @ fx @ dagy, bz @ Hy @ fx @ dagz], - [bx @ Hz @ fy @ dagx, zero, -bz @ Hx @ fy @ dagz], - [-bx @ Hy @ fz @ dagx, by @ Hx @ fz @ dagy, zero]]) - return P + return sparse.bmat( + [ + [zero, -by @ Hz @ fx @ dagy, bz @ Hy @ fx @ dagz], + [bx @ Hz @ fy @ dagx, zero, -bz @ Hx @ fy @ dagz], + [-bx @ Hy @ fz @ dagx, by @ Hx @ fz @ dagy, zero], + ] + ) def poynting_chew_e_cross(efield: np.ndarray, diff --git a/spins/fdfd_tools/solvers.py b/spins/fdfd_tools/solvers.py index 4bd1361..513a84f 100644 --- a/spins/fdfd_tools/solvers.py +++ b/spins/fdfd_tools/solvers.py @@ -88,7 +88,7 @@ def generic( """ if matrix_solver_opts is None: - matrix_solver_opts = dict() + matrix_solver_opts = {} b0 = -1j * omega * J A0 = operators.e_full(omega, dxes, epsilon=epsilon, mu=mu, pec=pec, pmc=pmc) @@ -104,9 +104,4 @@ def generic( x = matrix_solver(A.tocsr(), b, **matrix_solver_opts) - if adjoint: - x0 = Pl.H @ x - else: - x0 = Pr @ x - - return x0 + return Pl.H @ x if adjoint else Pr @ x diff --git a/spins/fdfd_tools/test_grid.py b/spins/fdfd_tools/test_grid.py index a194cea..7bca825 100644 --- a/spins/fdfd_tools/test_grid.py +++ b/spins/fdfd_tools/test_grid.py @@ -50,11 +50,14 @@ def test_make_nonuniform_grid(self): def make_dxes(): dx = 2 shape = [3, 3, 3] - return [[ - np.array([dx] * shape[0]), - np.array([dx] * shape[1]), - np.array([dx] * shape[2]), - ] for i in range(2)] + return [ + [ + np.array([dx] * shape[0]), + np.array([dx] * shape[1]), + np.array([dx] * shape[2]), + ] + for _ in range(2) + ] def test_apply_scpml_no_pmls(): diff --git a/spins/fdfd_tools/waveguide.py b/spins/fdfd_tools/waveguide.py index 819308b..1153bff 100644 --- a/spins/fdfd_tools/waveguide.py +++ b/spins/fdfd_tools/waveguide.py @@ -115,8 +115,7 @@ def normalized_fields(v: np.ndarray, S = 0.5 * ( (S1 + np.roll(S1, 1, axis=0)) - (S2 + np.roll(S2, 1, axis=1))) P = 0.5 * np.real(S.sum()) - assert P > 0, 'Found a mode propagating in the wrong direction! P={}'.format( - P) + assert P > 0, f'Found a mode propagating in the wrong direction! P={P}' norm_amplitude = 1 / np.sqrt(P) norm_angle = -np.angle(e[e.size // 2]) @@ -210,8 +209,7 @@ def h2e(wavenumber: complex, omega: complex, dxes: dx_lists_t, :param epsilon: Vectorized dielectric constant grid :return: Sparse matrix representation of the operator """ - op = sparse.diags(1 / (1j * omega * epsilon)) @ curl_h(wavenumber, dxes) - return op + return sparse.diags(1 / (1j * omega * epsilon)) @ curl_h(wavenumber, dxes) def curl_e(wavenumber: complex, dxes: dx_lists_t) -> sparse.spmatrix: diff --git a/spins/fdfd_tools/waveguide_mode.py b/spins/fdfd_tools/waveguide_mode.py index 205d9fb..dc73754 100644 --- a/spins/fdfd_tools/waveguide_mode.py +++ b/spins/fdfd_tools/waveguide_mode.py @@ -95,14 +95,12 @@ def solve_waveguide_mode_2d(mode_number: int, wavenumber -= 2 * np.sin(np.real(wavenumber / 2)) - np.real(wavenumber) shape = [d.size for d in dxes[0]] - fields = { + return { 'wavenumber': wavenumber, 'E': unvec(e, shape), 'H': unvec(h, shape), } - return fields - def solve_waveguide_mode(mode_number: int, omega: complex, @@ -177,14 +175,12 @@ def solve_waveguide_mode(mode_number: int, E[a][slices] = fields_2d['E'][o][:, :, None].transpose(reverse_order) H[a][slices] = fields_2d['H'][o][:, :, None].transpose(reverse_order) - results = { + return { 'wavenumber': fields_2d['wavenumber'], 'H': H, 'E': E, } - return results - def compute_source(E: field_t, H: field_t, @@ -219,7 +215,7 @@ def compute_source(E: field_t, M = [None]*3 src_order = np.roll(range(3), -axis) - exp_iphi = np.exp(1j * polarity * wavenumber * dxes[1][int(axis)][slices[int(axis)]]) + exp_iphi = np.exp(1j * polarity * wavenumber * dxes[1][axis][slices[axis]]) J[src_order[0]] = np.zeros_like(E[0]) J[src_order[1]] = +exp_iphi * H[src_order[2]] * polarity J[src_order[2]] = -exp_iphi * H[src_order[1]] * polarity @@ -234,7 +230,7 @@ def compute_source(E: field_t, for k in range(3): J[k] += Jm_iw[k] / (-1j * omega) - return J / dxes[1][int(axis)][slices[int(axis)]] + return J / dxes[1][axis][slices[axis]] def compute_overlap_e(E: field_t, @@ -292,7 +288,7 @@ def compute_overlap_e(E: field_t, npts = E[0].size dn = np.zeros(npts * 3, dtype=int) - dn[0:npts] = 1 + dn[:npts] = 1 dn = np.roll(dn, npts * axis) e2h = operators.e2h(omega, dxes, mu) diff --git a/spins/gds/gen_gds.py b/spins/gds/gen_gds.py index 3a2def7..8e9106d 100644 --- a/spins/gds/gen_gds.py +++ b/spins/gds/gen_gds.py @@ -40,10 +40,9 @@ def gen_gds(poly_coords: List[np.ndarray], if deembed: - containment_mx = [] - for polygon in gds_polygons: - containment_mx.append(gdspy.inside(test_points, [polygon])) - + containment_mx = [ + gdspy.inside(test_points, [polygon]) for polygon in gds_polygons + ] # Subtract by identity matrix since polygon_i trivially contains # polygon_i. containment_mx = np.array(containment_mx) - np.eye(len(gds_polygons)) @@ -73,9 +72,7 @@ def gen_gds(poly_coords: List[np.ndarray], for i in target_polys[::-1]: containment_list = np.nonzero(containment_mx[i, :])[0] # Concatenate all the contained polygons to subtract out. - poly_list = sum([ - gds_polygons[j].polygons for j in containment_list - ], []) + poly_list = sum((gds_polygons[j].polygons for j in containment_list), []) # Note that we had to turn precision from the default 1e-3 # to 1e-6 to avoid errors in the NOT operation. gds_polygons[i] = gdspy.boolean(gds_polygons[i], @@ -92,10 +89,10 @@ def gen_gds(poly_coords: List[np.ndarray], del gds_polygons[k] del test_points[k] - containment_mx = [] - for polygon in gds_polygons: - containment_mx.append(gdspy.inside(test_points, [polygon])) - + containment_mx = [ + gdspy.inside(test_points, [polygon]) + for polygon in gds_polygons + ] containment_mx = np.array(containment_mx) - np.eye( len(gds_polygons)) overlap_list = np.sum(containment_mx, axis=1) diff --git a/spins/gds/parse_gds.py b/spins/gds/parse_gds.py index 1298c0c..0cd99e5 100644 --- a/spins/gds/parse_gds.py +++ b/spins/gds/parse_gds.py @@ -103,25 +103,20 @@ def _get_top_level_cell(self, cell_name) -> gdspy.Cell: if cell_name is None: max_num_polys = 0 for cell in top_level_cells: - # Ignore cells that are hidden if cell.name.startswith("$$$"): continue - else: - num_polys = 0 - cell.flatten() - - polygon_sets = cell.get_polygonsets() - for polygon_set in polygon_sets: - num_polys += len(polygon_set.polygons) - - if max_num_polys == num_polys: - raise ValueError( - "Multiple top level cells with same number of " - "polygons. Unable to uniquely identify top level" - " cell. Please specify cell by name instead.") - if max_num_polys < num_polys: - max_num_polys = num_polys - max_poly_tlc = cell + cell.flatten() + + polygon_sets = cell.get_polygonsets() + num_polys = sum(len(polygon_set.polygons) for polygon_set in polygon_sets) + if max_num_polys == num_polys: + raise ValueError( + "Multiple top level cells with same number of " + "polygons. Unable to uniquely identify top level" + " cell. Please specify cell by name instead.") + if max_num_polys < num_polys: + max_num_polys = num_polys + max_poly_tlc = cell if max_num_polys > 0: return max_poly_tlc @@ -132,7 +127,7 @@ def _get_top_level_cell(self, cell_name) -> gdspy.Cell: for cell in top_level_cells: if cell.name == cell_name: return cell.flatten() - raise ValueError("Cell name not found, got {}.".format(cell_name)) + raise ValueError(f"Cell name not found, got {cell_name}.") def get_bounding_box(self, polygon_coords): """Returns a NamedTuple which describes the bounding box of a polygon. @@ -160,9 +155,7 @@ def get_bounding_box(self, polygon_coords): y_max = max(np_poly_coords[:, 1]) * 1000 y_min = min(np_poly_coords[:, 1]) * 1000 - box = Box(x_minmax=[x_min, x_max], y_minmax=[y_min, y_max]) - - return box + return Box(x_minmax=[x_min, x_max], y_minmax=[y_min, y_max]) def get_bounding_boxes(self, layer): """Returns a list containing the bounding box for polygons in layer. @@ -178,14 +171,10 @@ def get_bounding_boxes(self, layer): """ - boxes = [] if layer not in self.layers: return [] - for polygon in self.layers[layer]: - boxes.append(self.get_bounding_box(polygon)) - - return boxes + return [self.get_bounding_box(polygon) for polygon in self.layers[layer]] def get_polygons(self, layer): """Returns a list of all polygons in the specified layer. @@ -202,6 +191,4 @@ def get_polygons(self, layer): Raises: ValueError: If layer cannot be found. """ - if layer not in self.layers: - return [] - return self.layers[layer] + return [] if layer not in self.layers else self.layers[layer] diff --git a/spins/goos/array.py b/spins/goos/array.py index 8177c6b..a0f5c7d 100644 --- a/spins/goos/array.py +++ b/spins/goos/array.py @@ -24,7 +24,7 @@ def set_all(self, value: bool) -> None: flow.set_all(value) def __bool__(self): - return all(flow for flow in self.flow_flags) + return all(self.flow_flags) @dataclasses.dataclass class Grad(flows.Flow.Grad): @@ -91,7 +91,7 @@ def __eq__(self, value) -> bool: return self._flows == value._flows def __repr__(self): - return "ArrayFlow({})".format(self._flows) + return f"ArrayFlow({self._flows})" class ArrayFlowOpMixin: @@ -134,8 +134,7 @@ def __init__(self, if not name: continue if name in names: - raise ValueError( - "Duplicate flow name found, got {}".format(name)) + raise ValueError(f"Duplicate flow name found, got {name}") names.add(name) self._flow_names = flow_names @@ -170,7 +169,7 @@ def __getitem__(self, ind: Union[int, str]) -> goos.ProblemGraphNode: # We prepend now instead of the constructor because `_goos_name` # is not set until after object construction. if self._prepend_parent_name and flow_name: - flow_name = self._goos_name + "." + flow_name + flow_name = f"{self._goos_name}.{flow_name}" self._cast_objs[ind] = generic.cast(IndexOp(self, ind), self._flow_types[ind], name=flow_name) diff --git a/spins/goos/compat.py b/spins/goos/compat.py index a987081..b06de77 100644 --- a/spins/goos/compat.py +++ b/spins/goos/compat.py @@ -137,8 +137,7 @@ def create_old_param( elif type(param) == optplan.BicubicLevelSetParametrization: return create_bicubic_levelset_param(param, extents, pixel_size) else: - raise ValueError("Cannot create parametrization for type {}".format( - type(param))) + raise ValueError(f"Cannot create parametrization for type {type(param)}") def create_spins_param(params: Union[optplan.CubicParametrization, @@ -220,8 +219,7 @@ def create_spins_param(params: Union[optplan.CubicParametrization, elif params.type == "parametrization.cubic": param_class = parametrization.CubicParam else: - raise ValueError("Unexpected parametrization type, got {}".format( - params.type)) + raise ValueError(f"Unexpected parametrization type, got {params.type}") return param_class(initial_value=init_val, coarse_x=coarse_x, diff --git a/spins/goos/flows.py b/spins/goos/flows.py index 748f412..087c879 100644 --- a/spins/goos/flows.py +++ b/spins/goos/flows.py @@ -71,8 +71,9 @@ def __new__(meta, name, bases, class_dict): const_flag_fields = [(field.name, bool, dataclasses.field(default=False)) for field in nonconst_fields] - cls.ConstFlags = dataclasses.make_dataclass(name + ".ConstFlags", - const_flag_fields) + cls.ConstFlags = dataclasses.make_dataclass( + f"{name}.ConstFlags", const_flag_fields + ) def __bool__(self) -> bool: return all( @@ -87,13 +88,15 @@ def set_all(self, value: bool) -> None: # Create the gradient class. if "Grad" not in cls.__dict__: - grad_fields = [(field.name + "_grad", field.type, np_zero_field(1)) - for field in nonconst_fields] + grad_fields = [ + (f"{field.name}_grad", field.type, np_zero_field(1)) + for field in nonconst_fields + ] grad_bases = tuple(cls_base.Grad for cls_base in bases) - cls.Grad = dataclasses.make_dataclass(name + ".Grad", - grad_fields, - bases=grad_bases) + cls.Grad = dataclasses.make_dataclass( + f"{name}.Grad", grad_fields, bases=grad_bases + ) def __iadd__(self, value): for field in grad_fields: @@ -168,8 +171,7 @@ def __eq__(self, other: "Flow") -> bool: return True if self.__class__ != other.__class__: - raise NotImplemented( - "Cannot compare flow types, got {} and {}".format(self, other)) + raise NotImplemented(f"Cannot compare flow types, got {self} and {other}") for val1, val2 in zip(dataclasses.astuple(self), dataclasses.astuple(other)): @@ -204,9 +206,7 @@ def __iadd__(self, value): def __eq__(self, value) -> bool: if type(self) == NumericFlow.Grad: - if isinstance(value, numbers.Number): - return np.all(self.array_grad == value) - elif isinstance(value, np.ndarray): + if isinstance(value, (numbers.Number, np.ndarray)): return np.all(self.array_grad == value) elif type(value) == NumericFlow.Grad: return np.all(self.array_grad == value.array_grad) @@ -214,9 +214,7 @@ def __eq__(self, value) -> bool: def __eq__(self, value) -> bool: if type(self) == NumericFlow: - if isinstance(value, numbers.Number): - return np.all(self.array == value) - elif isinstance(value, np.ndarray): + if isinstance(value, (numbers.Number, np.ndarray)): return np.all(self.array == value) elif type(value) == NumericFlow: return np.all(self.array == value.array) diff --git a/spins/goos/generic.py b/spins/goos/generic.py index 9021dad..f534107 100644 --- a/spins/goos/generic.py +++ b/spins/goos/generic.py @@ -129,14 +129,10 @@ def run(self, plan: goos.OptimizationPlan) -> None: plan.logger.info(self._obj) return - if isinstance(self._obj, goos.Function): - nodes = [self._obj] - else: - nodes = self._obj - + nodes = [self._obj] if isinstance(self._obj, goos.Function) else self._obj values = plan.eval_nodes(nodes) for node, val in zip(nodes, values): - plan.logger.info("{}: {}".format(node._goos_name, val)) + plan.logger.info(f"{node._goos_name}: {val}") def log_print(*args, **kwargs) -> LogPrint: diff --git a/spins/goos/graph_executor.py b/spins/goos/graph_executor.py index d688101..8e548cb 100644 --- a/spins/goos/graph_executor.py +++ b/spins/goos/graph_executor.py @@ -95,9 +95,7 @@ def eval_fun( _eval_fun_vals(fun_vals, fun_flags, fun_map, graph, top_sorted_nodes) ret_vals = [fun_vals[out_node] for out_node in out_nodes] - if single_output: - return ret_vals[0] - return ret_vals + return ret_vals[0] if single_output else ret_vals def _eval_fun_vals( @@ -316,11 +314,7 @@ def _update_grad_vals(node, node_grad_vals): node = next(node_iter) while True: - while True: - # Postpone any heavy computation. - if _is_heavy_fun(fun_map[node]): - break - + while True and not _is_heavy_fun(fun_map[node]): # Get the gradients of the output function with respect to # the inputs of function `node`. input_vals = [fun_vals[next_node] for next_node in graph[node]] @@ -400,7 +394,7 @@ def _create_computational_graph( # To identify functions, we use `id`, which is guaranteed to be unique for # two different objects (for CPython, this is simply the memory address). out_nodes = [id(fun) for fun in fun_list] - fun_map = {node: fun for node, fun in zip(out_nodes, fun_list)} + fun_map = dict(zip(out_nodes, fun_list)) # Set of functions that do not have inputs. in_nodes = set() @@ -431,7 +425,7 @@ def _create_computational_graph( graph[node].append(next_fun_id) - if next_fun_id in fun_map.keys(): + if next_fun_id in fun_map: continue fun_map[next_fun_id] = next_fun diff --git a/spins/goos/grating.py b/spins/goos/grating.py index 97a954b..2eea9c7 100644 --- a/spins/goos/grating.py +++ b/spins/goos/grating.py @@ -145,11 +145,9 @@ def __init__( initial_value, levels) def eval(self, inputs: List[goos.NumericFlow]) -> goos.ShapeFlow: - if not self._use_edge_locs: - edge_locs = np.cumsum(inputs[0].array) - else: - edge_locs = inputs[0].array - + edge_locs = ( + inputs[0].array if self._use_edge_locs else np.cumsum(inputs[0].array) + ) if self._use_grating_param: # TODO(logansu): Handle inverted flag. # We have to instantiate the parametrization every time because @@ -197,12 +195,7 @@ def grad( grad = np.array( grad.flatten(order="F") @ self._param.calculate_gradient()) / self._pixel_size[self._dir] - if not self._use_grating_param: - # Remove the first and last "artificial" edges (see `eval`). - grad = grad[1:-1] - else: - grad = grad[:-1] - + grad = grad[:-1] if self._use_grating_param else grad[1:-1] if not self._use_edge_locs: grad = np.flip(np.cumsum(np.flip(grad))) @@ -419,7 +412,7 @@ def build_rasterized_1D(self, vector) -> np.ndarray: def get_level_vecs(self): # Separate out into edge parametrizations for each etch level. - level_vecs = [[] for i in range(len(self.etch_depths))] + level_vecs = [[] for _ in range(len(self.etch_depths))] last_level = len(level_vecs) for i in range(len(self.vector)): cur_level = int(self.levels[i]) @@ -442,11 +435,10 @@ def get_structure(self) -> np.ndarray: partial_pixels = 0 for i, raster_vec in enumerate(raster_vecs): pixels_left = layer_pixels[i] - if i != 0: - if np.abs(partial_pixels) > 0.001: - total_z[:, z_ind] += raster_vec * (1 - partial_pixels) - z_ind += 1 - pixels_left -= (1 - partial_pixels) + if i != 0 and np.abs(partial_pixels) > 0.001: + total_z[:, z_ind] += raster_vec * (1 - partial_pixels) + z_ind += 1 + pixels_left -= (1 - partial_pixels) full_pixels = int(pixels_left) partial_pixels = pixels_left - full_pixels if full_pixels > 0: @@ -495,10 +487,7 @@ def build_constraints(self): 'fun': lambda x: self.dims[0] - x, 'jac': lambda x: -ident } - constraints = [ - diff_constraint, lower_bound_constraint, upper_bound_constraint - ] - return constraints + return [diff_constraint, lower_bound_constraint, upper_bound_constraint] def calculate_gradient(self): # Brute force gradient computation. diff --git a/spins/goos/material.py b/spins/goos/material.py index 5dbcb13..07a3248 100644 --- a/spins/goos/material.py +++ b/spins/goos/material.py @@ -34,6 +34,4 @@ def __eq__(self, other) -> bool: def get_material(mat: Material): - if mat is None: - return None - return ConstantMaterial(mat.to_native()["index"]) + return None if mat is None else ConstantMaterial(mat.to_native()["index"]) diff --git a/spins/goos/math.py b/spins/goos/math.py index 45c2c27..5099df2 100644 --- a/spins/goos/math.py +++ b/spins/goos/math.py @@ -24,7 +24,7 @@ def __add__(self, obj) -> "Sum": return Sum([self, Constant(obj)]) if isinstance(obj, np.ndarray): return Sum([self, Constant(obj)]) - raise TypeError("Attempting to add node with type {}".format(type(obj))) + raise TypeError(f"Attempting to add node with type {type(obj)}") def __mul__(self, obj) -> "Product": if isinstance(obj, Product): @@ -37,8 +37,7 @@ def __mul__(self, obj) -> "Product": return Product([self, Constant(obj)]) if isinstance(obj, np.ndarray): return Product([self, Constant(obj)]) - raise TypeError("Attempting to multiply node with type {}".format( - type(obj))) + raise TypeError(f"Attempting to multiply node with type {type(obj)}") def __radd__(self, obj) -> "Sum": return self.__add__(obj) @@ -64,8 +63,7 @@ def __rtruediv__(self, obj) -> "Product": def __pow__(self, obj) -> "Power": if isinstance(obj, numbers.Number): return Power(self, obj) - raise TypeError("Attempting to exponenitate node with type {}".format( - type(obj))) + raise TypeError(f"Attempting to exponenitate node with type {type(obj)}") class Constant(Function): @@ -127,7 +125,7 @@ def eval(self, inputs: List) -> flows.NumericFlow: return flows.NumericFlow(goos.get_default_plan().get_var_value(self)) def set(self, value): - if isinstance(value, numbers.Number) or isinstance(value, np.ndarray): + if isinstance(value, (numbers.Number, np.ndarray)): value = Constant(value) goos.get_default_plan().add_action(SetVariable(self, value)) @@ -151,8 +149,8 @@ def __init__(self, var: Variable, value: Function) -> None: if (not isinstance(value, numbers.Number) and not isinstance(value, Function)): raise TypeError( - "`value` must be either numeric or a `goos.Function`," - " got {}".format(value)) + f"`value` must be either numeric or a `goos.Function`, got {value}" + ) def run(self, plan: goos.OptimizationPlan) -> None: value = self._value @@ -198,25 +196,18 @@ def find_dominant_numeric_flow(flow_list: List[goos.NumericFlow], for i, flow in enumerate(flow_list): if type(flow) == flows.NumericFlow: continue - if not dom_flow: - dom_flow = flow - ind = i - else: + if dom_flow: raise ValueError( "Input arguments to sum must have at most one flow where" " type is not `flows.NumericFlow`.") + dom_flow = flow + ind = i if not dom_flow: # Check whether all flows have same shape. flow_shapes = [np.shape(flow.array) for flow in flow_list] - # Check if all flows have same shape. - same_shape = True - for shape in flow_shapes: - if shape != flow_shapes[0]: - same_shape = False - break - + same_shape = all(shape == flow_shapes[0] for shape in flow_shapes) if same_shape: dom_flow = flow_list[0] ind = 0 @@ -225,20 +216,16 @@ def find_dominant_numeric_flow(flow_list: List[goos.NumericFlow], # Check whether all flows but one is a scalar. for i, flow in enumerate(flow_list): if np.size(flow.array) > 1: - if not dom_flow: - dom_flow = flow - ind = i - else: + if dom_flow: raise ValueError("Only one non-scalar is permitted.") + dom_flow = flow + ind = i if not dom_flow: dom_flow = flow_list[0] ind = 0 - if return_ind: - return dom_flow, ind - else: - return dom_flow + return (dom_flow, ind) if return_ind else dom_flow class Sum(Function): @@ -271,13 +258,11 @@ def __add__(self, obj): return Sum(self._fun + obj._fun) if isinstance(obj, Function): return Sum(self._fun + [obj]) - if isinstance(obj, numbers.Number) or isinstance(obj, np.ndarray): - return Sum(self._fun + [Constant(obj)]) - if isinstance(obj, np.ndarray): + if isinstance(obj, (numbers.Number, np.ndarray)): return Sum(self._fun + [Constant(obj)]) raise TypeError( - "Attempting to add a node with type {} to type `Sum`.".format( - type(obj))) + f"Attempting to add a node with type {type(obj)} to type `Sum`." + ) class Product(Function): @@ -318,13 +303,11 @@ def __mul__(self, obj): return Product(self._fun + obj._fun) if isinstance(obj, Function): return Product(self._fun + [obj]) - if isinstance(obj, numbers.Number) or isinstance(obj, np.ndarray): - return Product(self._fun + [Constant(obj)]) - if isinstance(obj, np.ndarray): + if isinstance(obj, (numbers.Number, np.ndarray)): return Product(self._fun + [Constant(obj)]) raise TypeError( - "Attempting to add a node with type {} to type `Sum`.".format( - type(obj))) + f"Attempting to add a node with type {type(obj)} to type `Sum`." + ) class Power(Function): @@ -553,7 +536,7 @@ def _make_slices(self, shape) -> List[slice]: slices += [slice(self._slices[i], self._slices[i] + 1)] elif isinstance(self._slices[i], List): slices += [slice(self._slices[i][0], self._slices[i][1])] - elif self._slices[i] == "c" or self._slices[i] == "center": + elif self._slices[i] in ["c", "center"]: slices += [slice(dim // 2, dim // 2 + 1)] elif self._slices[i] is None: slices += [slice(0, dim)] diff --git a/spins/goos/optplan/context.py b/spins/goos/optplan/context.py index 9846274..1f2b49c 100644 --- a/spins/goos/optplan/context.py +++ b/spins/goos/optplan/context.py @@ -19,14 +19,11 @@ def __init__(self) -> None: def register_node(self, node_name: str, schema, creator: Callable): if node_name in self._node_map: - raise ValueError( - "Node with name {} already registered.".format(node_name)) + raise ValueError(f"Node with name {node_name} already registered.") self._node_map[node_name] = ContextEntry(schema, creator) def get_node(self, node_name: str): - if node_name in self._node_map: - return self._node_map[node_name] - return None + return self._node_map[node_name] if node_name in self._node_map else None def get_node_map(self) -> Dict: return self._node_map @@ -50,13 +47,12 @@ def register_node(self, node_name: str, schema, creator: Callable): def get_node(self, node_name: str): for context in reversed(self._stack): - node = context.get_node(node_name) - if node: + if node := context.get_node(node_name): return node - raise ValueError("Cannot find node {}".format(node_name)) + raise ValueError(f"Cannot find node {node_name}") def get_node_map(self) -> Dict: node_map = {} for context in self._stack: - node_map.update(context.get_node_map()) + node_map |= context.get_node_map() return node_map diff --git a/spins/goos/optplan/optplan.py b/spins/goos/optplan/optplan.py index 649e844..e4e91f9 100644 --- a/spins/goos/optplan/optplan.py +++ b/spins/goos/optplan/optplan.py @@ -22,12 +22,11 @@ def __new__(meta, name, bases, class_dict): # Check that the node has a name. if not hasattr(cls, "node_type"): - raise ValueError( - "Problem graph node {} did not define `node_type`.".format(cls)) + raise ValueError(f"Problem graph node {cls} did not define `node_type`.") # `ProblemGraphNode` has special implementation. Do not treat as normal # node. - if cls.node_type == "goos.problem_graph_node" or cls.node_type == "goos.action": + if cls.node_type in ["goos.problem_graph_node", "goos.action"]: return cls # Validate that the constructor arguments are valid. @@ -42,32 +41,21 @@ def __new__(meta, name, bases, class_dict): # Parameters should have type annotation. if param.annotation == param.empty: - raise TypeError("Parameter `{}` has no type annotation.".format( - param.name)) - - # Default values need to be serializable. - if param.default != param.empty and False: - raise ValueError( - "Parameter `{}` has unserializable default value," - " got {}".format(param.name, param.default)) + raise TypeError(f"Parameter `{param.name}` has no type annotation.") # We cannot handle variational parameters. - if (param.kind == param.VAR_POSITIONAL or - param.kind == param.VAR_KEYWORD): - raise TypeError("Problem graph node \"{}\" has a variational" - " parameter.".format(cls.node_type)) + if param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]: + raise TypeError( + f'Problem graph node \"{cls.node_type}\" has a variational parameter.' + ) # Create the schema class. # Check if a class called `Schema` is declared in the class itself # (as opposed to one of its parents). if "Schema" not in cls.__dict__: - # We need to construct a schema inheritance hierarchy that parallels - # the inheritance hierarchy in the actual nodes. - schema_base_classes = [] - for base in bases: - if issubclass(base, ProblemGraphNode): - schema_base_classes.append(base.Schema) - + schema_base_classes = [ + base.Schema for base in bases if issubclass(base, ProblemGraphNode) + ] # If the class explicitly defines `__init__`, then build up a schema # adding arguments from its constructor. Else, build a schema # for the sake of preserving hierarchy but do not add any fields. @@ -87,11 +75,11 @@ def __new__(meta, name, bases, class_dict): cls.Schema, cls) return cls - def __call__(cls, *args, **kwargs): + def __call__(self, *args, **kwargs): # `ProblemGraphNode` has special implementation. Do not treat as normal # node. - if cls.node_type == "goos.problem_graph_node" or cls.node_type == "goos.action": - return super(ProblemGraphNodeMeta, cls).__call__(*args, **kwargs) + if self.node_type in ["goos.problem_graph_node", "goos.action"]: + return super(ProblemGraphNodeMeta, self).__call__(*args, **kwargs) if "name" in kwargs: node_name = kwargs["name"] @@ -100,7 +88,7 @@ def __call__(cls, *args, **kwargs): node_name = None # Verify that all arguments are serializable and save argument values. - signature = inspect.signature(cls.__init__) + signature = inspect.signature(self.__init__) schema_data = {} # Index of the next signature parameter to check. # Start at 1 to ignore `self`. @@ -112,30 +100,19 @@ def __call__(cls, *args, **kwargs): param_ind += 1 # Parse out keyword arguments. for param in parameters[param_ind:]: - if param.name in kwargs: - schema_data[param.name] = kwargs[param.name] - else: - schema_data[param.name] = param.default - - obj = super(ProblemGraphNodeMeta, cls).__call__(*args, **kwargs) + schema_data[param.name] = kwargs.get(param.name, param.default) + obj = super(ProblemGraphNodeMeta, self).__call__(*args, **kwargs) # Name the node and set the schema. - if node_name: - obj._goos_name = node_name - else: - obj._goos_name = optplan.schema.generate_name(cls.node_type) - + obj._goos_name = node_name or optplan.schema.generate_name(self.node_type) # Allow the node to custom override the way the schema is set. if not hasattr(obj, "_goos_schema") or not obj._goos_schema: schema_data["name"] = obj._goos_name schema_data = _replace_node_with_schema(schema_data) - obj._goos_schema = cls.Schema(**schema_data) + obj._goos_schema = self.Schema(**schema_data) else: obj._goos_schema.name = obj._goos_name - # TODO(logansu): Validate the schema. - - default_plan = get_default_plan() - if default_plan: + if default_plan := get_default_plan(): default_plan.add_node(obj) return obj @@ -342,15 +319,14 @@ def _create_node(model: models.Model) -> ProblemGraphNode: def get_state_dict(self) -> Dict: """Creates a dictionary with the current state of the plan.""" - # Setup variable data. - var_data = {} - for var_name, var_val in self._var_value.items(): - var_data[var_name] = { + var_data = { + var_name: { "value": var_val, "frozen": self._var_frozen[var_name], "bounds": self._var_bounds[var_name], } - + for var_name, var_val in self._var_value.items() + } data = { "version": "0.2.0", "action_ptr": self._action_ptr, @@ -412,19 +388,17 @@ def write_event(self, if mon_val.ndim == 1: mon_val = mon_val[0] if np.isreal(mon_val): - self._logger.info("Monitor {}: {}".format( - mon_name, mon_val)) + self._logger.info(f"Monitor {mon_name}: {mon_val}") else: self._logger.info( - "Monitor {}: {} (mag={}, phase={})".format( - mon_name, mon_val, np.abs(mon_val), - np.angle(mon_val))) + f"Monitor {mon_name}: {mon_val} (mag={np.abs(mon_val)}, phase={np.angle(mon_val)})" + ) # Save the data. if self._save_path: file_path = os.path.join( - self._save_path, - os.path.join("step{}.pkl".format(self._log_counter))) + self._save_path, os.path.join(f"step{self._log_counter}.pkl") + ) with open(file_path, "wb") as handle: pickle.dump(data, handle) self._logger.info("Data saved to %s", file_path) @@ -517,8 +491,8 @@ def set_var_value(self, check_frozen: bool = True) -> None: if check_frozen and self._var_frozen[node._goos_name]: raise ValueError( - "Attempting to set value of frozen variable {}".format( - node._goos_name)) + f"Attempting to set value of frozen variable {node._goos_name}" + ) self._var_value[node._goos_name] = np.array(value) @_requires_action @@ -558,14 +532,17 @@ def run(self, auto_checkpoint: bool = True) -> None: """ self._modifiable = True while self._action_ptr < len(self._actions): - self.logger.info("Running action {} ({}).".format( - self._action_ptr, self._actions[self._action_ptr]._goos_name)) + self.logger.info( + f"Running action {self._action_ptr} ({self._actions[self._action_ptr]._goos_name})." + ) self._actions[self._action_ptr].run(self) if auto_checkpoint and self._save_path: self.write_checkpoint( - os.path.join(self._save_path, - "action{}.chkpt".format(self._action_ptr))) + os.path.join( + self._save_path, f"action{self._action_ptr}.chkpt" + ) + ) self._action_ptr += 1 self._action_data = None @@ -600,25 +577,27 @@ def resume(self, # Attempt to resume if there is event data to resume from. if self._action_data: - self.logger.info("Resuming action {} ({}).".format( - self._action_ptr, self._actions[self._action_ptr]._goos_name)) + self.logger.info( + f"Resuming action {self._action_ptr} ({self._actions[self._action_ptr]._goos_name})." + ) if hasattr(self._actions[self._action_ptr], "resume"): self._actions[self._action_ptr].resume(self, self._action_data) else: self.logger.warning( - "Action has no resume capabilities {}, re-running.".format( - self._action_ptr)) + f"Action has no resume capabilities {self._action_ptr}, re-running." + ) self._actions[self._action_ptr].run(self) else: - self.logger.info("Running action {} ({}).".format( - self._action_ptr, self._actions[self._action_ptr]._goos_name)) + self.logger.info( + f"Running action {self._action_ptr} ({self._actions[self._action_ptr]._goos_name})." + ) self._actions[self._action_ptr].run(self) if auto_checkpoint and self._save_path: self.write_checkpoint( - os.path.join(self._save_path, - "action{}.chkpt".format(self._action_ptr))) + os.path.join(self._save_path, f"action{self._action_ptr}.chkpt") + ) self._action_ptr += 1 self._action_data = None @@ -713,6 +692,4 @@ def pop_plan() -> OptimizationPlan: def get_default_plan() -> OptimizationPlan: - if optplan.GLOBAL_PLAN_STACK: - return optplan.GLOBAL_PLAN_STACK[-1] - return None + return optplan.GLOBAL_PLAN_STACK[-1] if optplan.GLOBAL_PLAN_STACK else None diff --git a/spins/goos/optplan/schema.py b/spins/goos/optplan/schema.py index 6f7d19f..d24ae6b 100644 --- a/spins/goos/optplan/schema.py +++ b/spins/goos/optplan/schema.py @@ -33,11 +33,7 @@ def is_union_type(cls): return True # Python 3.6 - if not hasattr(cls, "__origin__"): - return False - if cls.__origin__ is Union: - return True - return False + return cls.__origin__ is Union if hasattr(cls, "__origin__") else False typing_inspect.is_union_type = is_union_type @@ -69,7 +65,7 @@ def get_args(cls) -> Tuple: elif issubclass(cls, List): return cls.__args__ else: - raise ValueError("Cannot get type arguments for {}".format(cls)) + raise ValueError(f"Cannot get type arguments for {cls}") typing.get_args = get_args @@ -137,12 +133,11 @@ class FunSchema(models.Model): if skip_first_arg: param_list = param_list[1:] - model_args = {} - for param in param_list: - model_args[param.name] = _convert_param_to_schematics(param) - + model_args = { + param.name: _convert_param_to_schematics(param) for param in param_list + } if other_fields: - model_args.update(other_fields) + model_args |= other_fields return type(schema_name, tuple(base_classes), model_args) @@ -196,8 +191,7 @@ def _convert_type_to_schematics(typ) -> Tuple: arg_typ, arg_args = _convert_type_to_schematics(arg) schema_args.append(arg_typ(*arg_args)) else: - raise ValueError( - "Cannot convert type to schema type, got {}".format(typ)) + raise ValueError(f"Cannot convert type to schema type, got {typ}") elif issubclass(typ, optplan.ProblemGraphNode): schema_type = optplan.ReferenceType schema_args = [typ.Schema] @@ -209,8 +203,7 @@ def _convert_type_to_schematics(typ) -> Tuple: schema_type = types.PolyModelType schema_args = [typ] else: - raise ValueError( - "Cannot convert type to schema type, got {}".format(typ)) + raise ValueError(f"Cannot convert type to schema type, got {typ}") return schema_type, schema_args @@ -264,8 +257,7 @@ def generate_name(model_type: str) -> str: if model_type not in optplan.PROBLEM_GRAPH_NAME_MAP: optplan.PROBLEM_GRAPH_NAME_MAP[model_type] = 0 - name = "{}.{}".format(model_type, - optplan.PROBLEM_GRAPH_NAME_MAP[model_type]) + name = f"{model_type}.{optplan.PROBLEM_GRAPH_NAME_MAP[model_type]}" optplan.PROBLEM_GRAPH_NAME_MAP[model_type] += 1 @@ -313,18 +305,16 @@ def _iter_optplan_fields( # Wrap `process_field` so that returning `None` is same as returning the # child. def process_field_wrapped( - parent: models.Model, - child: Union[str, optplan.ProblemGraphNodeSchema], - field_type: optplan.ReferenceType, - ) -> optplan.ProblemGraphNodeSchema: + parent: models.Model, + child: Union[str, optplan.ProblemGraphNodeSchema], + field_type: optplan.ReferenceType, + ) -> optplan.ProblemGraphNodeSchema: if pass_field_info: return_val = process_field(parent, child, field_type) else: return_val = process_field(parent, child) - if return_val is None: - return child - return return_val + return child if return_val is None else return_val with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -396,8 +386,9 @@ def validate(plan: optplan.OptimizationPlanSchema) -> None: node_name = node.name node.validate() except Exception as exc: - raise ValueError("Error encountered when validating node {}".format( - node_name)) from exc + raise ValueError( + f"Error encountered when validating node {node_name}" + ) from exc # Now validate the plan schema itself just in case we missed something # from the previous checks. @@ -407,7 +398,7 @@ def validate(plan: optplan.OptimizationPlanSchema) -> None: names = set() for node in plan.nodes: if node.name in names: - raise ValueError("Nonunique name found: {}".format(node.name)) + raise ValueError(f"Nonunique name found: {node.name}") names.add(node.name) @@ -449,8 +440,7 @@ def _validate_optplan_version(version: str) -> None: """ version_parts = [int(part) for part in version.split(".")] if version_parts[0] < 0 or version_parts[1] < 3: - raise ValueError( - "Optplan must be at least version 0.3.0, got {}".format(version)) + raise ValueError(f"Optplan must be at least version 0.3.0, got {version}") def loads(serialized_plan: str) -> optplan.OptimizationPlanSchema: diff --git a/spins/goos/optplan/schema_optplan.py b/spins/goos/optplan/schema_optplan.py index 6ea6065..f2ee99b 100644 --- a/spins/goos/optplan/schema_optplan.py +++ b/spins/goos/optplan/schema_optplan.py @@ -62,8 +62,8 @@ def __init__(self, *args, **kwargs) -> None: # Validate the name. if self.name.startswith("__"): raise ValueError( - "Name cannot start with two underscores (__), got {}".format( - self.name)) + f"Name cannot start with two underscores (__), got {self.name}" + ) # Verify that reference fields have been appropriately set. This is # actually a redundant check since `optplan.loads` and `optplan.dumps` @@ -96,8 +96,9 @@ def __init__(self, *args, **kwargs) -> None: elif isinstance(field_value, optplan.WildcardSchema): continue - raise ValueError("Expected type {} for field {}, got {}".format( - field_type.reference_type, field_name, type(field_value))) + raise ValueError( + f"Expected type {field_type.reference_type} for field {field_name}, got {type(field_value)}" + ) class ProblemGraphNodeSchema(NodeSchema): @@ -146,8 +147,8 @@ def _convert(self, value, context): if not issubclass(model_class, self._node_meta_type): raise ValueError( - "Unknown node, got node type '{}' with metatype '{}'".format( - value["type"], self._node_meta_type)) + f"""Unknown node, got node type '{value["type"]}' with metatype '{self._node_meta_type}'""" + ) return model_class(value, context=context) diff --git a/spins/goos/optplan/schema_types.py b/spins/goos/optplan/schema_types.py index 839fccb..cf9bff5 100644 --- a/spins/goos/optplan/schema_types.py +++ b/spins/goos/optplan/schema_types.py @@ -21,11 +21,10 @@ def to_native(self, value, context=None): elif isinstance(value, list): if len(value) != 2: raise ValueError( - "Complex number primitive form must be a list of two" - " elements, got {}".format(value)) + f"Complex number primitive form must be a list of two elements, got {value}" + ) return value[0] + 1j * value[1] - raise ValueError( - "Could not convert to complex number, got {}".format(value)) + raise ValueError(f"Could not convert to complex number, got {value}") def to_primitive(self, value, context=None): if not isinstance(value, complex): @@ -41,9 +40,7 @@ class NumpyArrayType(types.BaseType): """ def to_native(self, value, context=None): - if isinstance(value, np.ndarray): - return value - elif isinstance(value, numbers.Number): + if isinstance(value, (np.ndarray, numbers.Number)): return value elif isinstance(value, dict): return np.array(value["real"]) + 1j * np.array(value["imag"]) diff --git a/spins/goos/optplan/schema_utils.py b/spins/goos/optplan/schema_utils.py index a1023bf..e638784 100644 --- a/spins/goos/optplan/schema_utils.py +++ b/spins/goos/optplan/schema_utils.py @@ -176,11 +176,11 @@ def __init__(self, *args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore") - init_kwargs = {} - for key, value in kwargs.items(): - if key not in self._schema.fields.keys(): - init_kwargs[key] = value - + init_kwargs = { + key: value + for key, value in kwargs.items() + if key not in self._schema.fields.keys() + } super().__init__(*args, **init_kwargs) for key, item in kwargs.items(): if key in self._schema.fields.keys(): diff --git a/spins/goos/schema_registry.py b/spins/goos/schema_registry.py index c1e575e..930bd34 100644 --- a/spins/goos/schema_registry.py +++ b/spins/goos/schema_registry.py @@ -25,14 +25,11 @@ def register(self, creator: Callable, metadata: Dict = None): if name in self._node_map: - raise ValueError( - "Node with name {} already registered.".format(name)) + raise ValueError(f"Node with name {name} already registered.") self._node_map[name] = SchemaEntry(schema, creator, metadata) def get(self, node_name: str): - if node_name in self._node_map: - return self._node_map[node_name] - return None + return self._node_map[node_name] if node_name in self._node_map else None def get_map(self) -> collections.OrderedDict: return self._node_map @@ -55,10 +52,9 @@ def register(self, *args, **kwargs): def get(self, node_name: str): for context in reversed(self._stack): - node = context.get(node_name) - if node: + if node := context.get(node_name): return node - raise ValueError("Cannot find node {}".format(node_name)) + raise ValueError(f"Cannot find node {node_name}") def get_map(self) -> collections.OrderedDict: node_map = collections.OrderedDict() diff --git a/spins/goos/shapes.py b/spins/goos/shapes.py index c1bac0c..f8dd288 100644 --- a/spins/goos/shapes.py +++ b/spins/goos/shapes.py @@ -219,16 +219,6 @@ def get_relative_cell_coords(cls, extents: np.ndarray, """ edge_coords = cls.get_relative_edge_coords(extents, pixel_size) return [(coord[:-1] + coord[1:]) / 2 for coord in edge_coords] - coords = [] - for i in range(len(extents)): - coord = pixel_size[i] * np.arange( - 0, - int(extents[i] / pixel_size[i]) + 1) - if coord[-1] >= extents[i]: - coords.append(coord[:-1]) - else: - coords.append(coord) - return [coord - np.mean(coord) for coord in coords] def get_cell_coords(self) -> List[np.ndarray]: """Returns the absolute coordinates of the cells.""" diff --git a/spins/goos/test_graph_executor.py b/spins/goos/test_graph_executor.py index 6077673..8773608 100644 --- a/spins/goos/test_graph_executor.py +++ b/spins/goos/test_graph_executor.py @@ -12,14 +12,14 @@ def test_top_sort_affinity(): "g": ["h"], "h": [] } - affinity_nodes = set(["b", "g", "f"]) + affinity_nodes = {"b", "g", "f"} sorted_nodes = graph_executor._top_sort_affinity(graph, affinity_nodes) # The topological ordering must be a[cde][bf]gh where square brackets denote # that any combination is acceptable. assert sorted_nodes[0] == "a" - assert set(sorted_nodes[1:4]) == set(["c", "d", "e"]) - assert set(sorted_nodes[4:6]) == set(["b", "f"]) + assert set(sorted_nodes[1:4]) == {"c", "d", "e"} + assert set(sorted_nodes[4:6]) == {"b", "f"} assert sorted_nodes[6] == "g" assert sorted_nodes[7] == "h" diff --git a/spins/goos/util.py b/spins/goos/util.py index d252826..78e52b9 100644 --- a/spins/goos/util.py +++ b/spins/goos/util.py @@ -74,12 +74,12 @@ def visualize_fields( plt.subplot(2, 3, i + 1) plt.imshow(np.real(fields[i][slicer].squeeze())) plt.colorbar() - plt.title("Re[E{}]".format(comp_name)) + plt.title(f"Re[E{comp_name}]") plt.subplot(2, 3, i + 4) plt.imshow(np.imag(fields[i][slicer].squeeze())) plt.colorbar() - plt.title("Im[E{}]".format(comp_name)) + plt.title(f"Im[E{comp_name}]") plt.show() diff --git a/spins/goos_sim/maxwell/render.py b/spins/goos_sim/maxwell/render.py index ecd2c4e..319985c 100644 --- a/spins/goos_sim/maxwell/render.py +++ b/spins/goos_sim/maxwell/render.py @@ -168,8 +168,7 @@ def __init__(self, flow: goos.ArrayFlow): def eval(self, grid: gridlock.Grid, params: RenderParams): extra_grids = [] for s in self._shapes: - extra_grid = s.eval(grid, params) - if extra_grid: + if extra_grid := s.eval(grid, params): extra_grids.append(extra_grid) return extra_grids @@ -183,8 +182,7 @@ def _create_geometry(shape: Union[goos.ArrayFlow, goos.Shape]) -> GeometryImpl: if isinstance(shape, flow_entry.schema): return flow_entry.creator(shape) - raise ValueError("Encountered unrenderable type, got {}.".format( - type(shape))) + raise ValueError(f"Encountered unrenderable type, got {type(shape)}.") def get_rendering_matrix(shape_edge_coords, grid_edge_coords): @@ -225,5 +223,3 @@ def get_rendering_matrix_1d(shape_coord, grid_coord): shape=(len(grid_coord) - 1, len(shape_coord) - 1)) -if __name__ == "__main__": - pass diff --git a/spins/goos_sim/maxwell/simspace.py b/spins/goos_sim/maxwell/simspace.py index 08a8f6b..843efce 100644 --- a/spins/goos_sim/maxwell/simspace.py +++ b/spins/goos_sim/maxwell/simspace.py @@ -86,22 +86,18 @@ def create_edge_coords( # Give warning and modify simulation extent, if it will produce an odd dxes # length in a symmetric axis for i, ext in enumerate(extents): - if reflection_symmetry[i]: - if not (ext / (2 * dx)).is_integer(): - extents[i] = np.floor(ext / (2 * dx)) * 2 * dx - logger.warning( - "Symmetry requires simulation extents to be an integer " - "multiple of `2 * dx`, the simulation extents for {} " - "direction has been changed to {}".format( - "xyz"[i], extents[i])) + if reflection_symmetry[i] and not (ext / (2 * dx)).is_integer(): + extents[i] = np.floor(ext / (2 * dx)) * 2 * dx + logger.warning( + f'Symmetry requires simulation extents to be an integer multiple of `2 * dx`, the simulation extents for {"xyz"[i]} direction has been changed to {extents[i]}' + ) xyz_min = np.array(sim_region.center) - np.array(extents) / 2 xyz_max = np.array(sim_region.center) + np.array(extents) / 2 - edge_coords = [] - for i in range(3): - edge_coords.append(np.arange(xyz_min[i], xyz_max[i] + dx / 2, dx)) - + edge_coords = [ + np.arange(xyz_min[i], xyz_max[i] + dx / 2, dx) for i in range(3) + ] # Recenter around the center. edge_coords = [ e - (e[0] + e[-1]) / 2 + c diff --git a/spins/goos_sim/maxwell/simulate.py b/spins/goos_sim/maxwell/simulate.py index c818a92..f31de78 100644 --- a/spins/goos_sim/maxwell/simulate.py +++ b/spins/goos_sim/maxwell/simulate.py @@ -224,15 +224,14 @@ def before_sim(self, sim: FdfdSimProp) -> None: pass def eval(self, sim: FdfdSimProp) -> goos.Flow: - raise NotImplemented("{} cannot be evaluated.".format(type(self))) + raise NotImplemented(f"{type(self)} cannot be evaluated.") def before_adjoint_sim(self, adjoint_sim: FdfdSimProp, grad_val: goos.Flow.Grad) -> None: # If an implementation is not provided, then the gradient must be # `None` to indicate that the gradient is not needed. if grad_val: - raise NotImplemented( - "Cannot differentiate with respect to {}".format(type(self))) + raise NotImplemented(f"Cannot differentiate with respect to {type(self)}") @goos.polymorphic_model() @@ -589,20 +588,19 @@ def _create_solver(solver_name: str, solver_info: Optional[Solver], if solver_info: solver = maxwell.SIM_REGISTRY.get(solver_info.type).creator( solver_info, dims) + elif solver_name == "local_direct": + solver = DIRECT_SOLVER + elif solver_name == "maxwell_bicgstab": + from spins.fdfd_solvers.maxwell import MaxwellSolver + solver = MaxwellSolver(dims, solver="biCGSTAB") + elif solver_name == "maxwell_cg": + from spins.fdfd_solvers.maxwell import MaxwellSolver + solver = MaxwellSolver(dims, solver="CG") + elif solver_name == "maxwell_eig": + from spins.fdfd_solvers.maxwell import MaxwellSolver + solver = MaxwellSolver(dims, solver="Jacobi-Davidson") else: - if solver_name == "maxwell_cg": - from spins.fdfd_solvers.maxwell import MaxwellSolver - solver = MaxwellSolver(dims, solver="CG") - elif solver_name == "maxwell_bicgstab": - from spins.fdfd_solvers.maxwell import MaxwellSolver - solver = MaxwellSolver(dims, solver="biCGSTAB") - elif solver_name == "maxwell_eig": - from spins.fdfd_solvers.maxwell import MaxwellSolver - solver = MaxwellSolver(dims, solver="Jacobi-Davidson") - elif solver_name == "local_direct": - solver = DIRECT_SOLVER - else: - raise ValueError("Unknown solver, got {}".format(solver_name)) + raise ValueError(f"Unknown solver, got {solver_name}") return solver @@ -612,7 +610,7 @@ def _create_outputs(outputs: List[SimOutput]) -> List[SimOutputImpl]: for out in outputs: cls = maxwell.SIM_REGISTRY.get(out.type) if cls is None: - raise ValueError("Unsupported output type, got {}".format(out.type)) + raise ValueError(f"Unsupported output type, got {out.type}") output_impls.append(cls.creator(out)) return output_impls @@ -622,7 +620,7 @@ def _create_sources(sources: List[SimSource]) -> List[SimSourceImpl]: for src in sources: cls = maxwell.SIM_REGISTRY.get(src.type) if cls is None: - raise ValueError("Unsupported output type, got {}".format(src.type)) + raise ValueError(f"Unsupported output type, got {src.type}") source_impls.append(cls.creator(src)) return source_impls @@ -644,10 +642,7 @@ def fdfd_simulation(wavelength: float, wavelength=wavelength, simulation_space=simulation_space, **kwargs) - if return_eps: - return sim, eps - else: - return sim + return (sim, eps) if return_eps else sim @dataclasses.dataclass @@ -704,18 +699,15 @@ def __init__( self._solver = _create_solver(solver, None, self._grid.shape) else: raise ValueError( - "Invalid solver, solver for eigensolves need to be in " + - str(eigen_solvers) + ".") + f"Invalid solver, solver for eigensolves need to be in {eigen_solvers}." + ) self._simspace = simulation_space - if bloch_vector: - self._bloch_vector = bloch_vector - else: - self._bloch_vector = np.array([0, 0, 0]) - self._wlen = wavelength #this will be used for the initial guess + self._bloch_vector = bloch_vector or np.array([0, 0, 0]) self._pml_layers = [ int(length / self._simspace.mesh.dx) for length in self._simspace.pml_thickness ] + self._wlen = wavelength self._symmetry = simulation_space.reflection_symmetry self._sources = _create_sources(sources) @@ -847,10 +839,7 @@ def eig_simulation(wavelength: float, simulation_space=simulation_space, bloch_vector=bloch_vector, **kwargs) - if get_eps: - return sim, eps - else: - return sim + return (sim, eps) if get_eps else sim @goos.polymorphic_model() diff --git a/spins/gridlock/direction.py b/spins/gridlock/direction.py index 18daefd..555b1a4 100644 --- a/spins/gridlock/direction.py +++ b/spins/gridlock/direction.py @@ -24,11 +24,7 @@ def axisvec2polarity(vector: np.ndarray) -> int: The polarity of the vector, which is either 1 (for positive direction) and -1 (for negative direction). """ - if isinstance(vector, List): - vec = np.array(vector) - else: - vec = vector - + vec = np.array(vector) if isinstance(vector, List) else vector axis = axisvec2axis(vec) return np.sign(vec[axis]) @@ -46,19 +42,12 @@ def axisvec2axis(vector: np.ndarray) -> int: Raises: ValueError: If the vector is not axis-aligned. """ - if isinstance(vector, List): - vec = np.array(vector) - else: - vec = vector - + vec = np.array(vector) if isinstance(vector, List) else vector norm = np.linalg.norm(vec) delta = 1e-6 * norm # Check that only one element of vector is larger than delta. if sum(abs(vec) > delta) != 1: - raise ValueError( - "Vector has no valid primary coordinate axis, got: {}".format(vec)) - - axis = np.argwhere(abs(vec) > delta).flatten()[0] + raise ValueError(f"Vector has no valid primary coordinate axis, got: {vec}") - return axis + return np.argwhere(abs(vec) > delta).flatten()[0] diff --git a/spins/gridlock/float_raster.py b/spins/gridlock/float_raster.py index f7fffe4..1b742ca 100644 --- a/spins/gridlock/float_raster.py +++ b/spins/gridlock/float_raster.py @@ -36,11 +36,13 @@ def raster_1D(poly_x: np.ndarray, if `grid_x` has less than 2 elements. """ if poly_x.size != 2: - raise ValueError("Expected `poly_x` to have exactly 2 elements, got " - "{} instead.".format(poly_x.size)) + raise ValueError( + f"Expected `poly_x` to have exactly 2 elements, got {poly_x.size} instead." + ) if grid_x.size < 2: - raise ValueError("Expected `grid_x` to have at least 2 elements, got " - "{} instead.".format(grid_x.size)) + raise ValueError( + f"Expected `grid_x` to have at least 2 elements, got {grid_x.size} instead." + ) # Get the dimensions of the grid. dim = grid_x.size @@ -431,12 +433,13 @@ def raster_2D(poly_xy: np.ndarray, or `grid_y` have a size less than 2. """ if poly_xy.shape[0] != 2: - raise ValueError("Expected `poly_xy` to have 2 rows, got {} instead.". - format(poly_xy.shape[0])) + raise ValueError( + f"Expected `poly_xy` to have 2 rows, got {poly_xy.shape[0]} instead." + ) if grid_x.size < 2 or grid_y.size < 2: - raise ValueError("Expected both `grid_x` and `grid_y` to have atleast 2" - " elements, got sizes of {} and {} respectively." - .format(grid_x.size, grid_y.size)) + raise ValueError( + f"Expected both `grid_x` and `grid_y` to have atleast 2 elements, got sizes of {grid_x.size} and {grid_y.size} respectively." + ) # Oversample the polygon by including its intersection with the grid as # new vertices. diff --git a/spins/gridlock/grid.py b/spins/gridlock/grid.py index eb4d3c2..ca4b97a 100644 --- a/spins/gridlock/grid.py +++ b/spins/gridlock/grid.py @@ -251,7 +251,7 @@ def ind2pos(self, low_bound = -0.5 high_bound = -0.5 if (ind < low_bound).any() or (ind > self.shape - high_bound).any(): - raise GridError('Position outside of grid: {}'.format(ind)) + raise GridError(f'Position outside of grid: {ind}') if round_ind: rind = np.clip(np.round(ind), 0, self.shape - 1) @@ -285,7 +285,7 @@ def pos2ind(self, """ r = np.squeeze(r) if r.size != 3: - raise GridError('r must be 3-element vector: {}'.format(r)) + raise GridError(f'r must be 3-element vector: {r}') if (which_shifts is not None) and (which_shifts >= self.shifts.shape[0]): @@ -297,7 +297,7 @@ def pos2ind(self, for a in range(3): if self.shape[a] > 1 and (r[a] < sexyz[a][0] or r[a] > sexyz[a][-1]): - raise GridError('Position[{}] outside of grid!'.format(a)) + raise GridError(f'Position[{a}] outside of grid!') grid_pos = zeros((3,)) for a in range(3): @@ -352,29 +352,24 @@ def __init__(self, self.exyz = [np.unique(pixel_edge_coordinates[i]) for i in range(3)] for i in range(3): if len(self.exyz[i]) != len(pixel_edge_coordinates[i]): - warnings.warn( - 'Dimension {} had duplicate edge coordinates'.format(i)) - - if is_scalar(periodic): - self.periodic = [periodic] * 3 - else: - self.periodic = [False] * 3 + warnings.warn(f'Dimension {i} had duplicate edge coordinates') + self.periodic = [periodic] * 3 if is_scalar(periodic) else [False] * 3 self.shifts = np.array(shifts, dtype=float) self.comp_shifts = np.array(comp_shifts, dtype=float) if self.shifts.shape[1] != 3: GridError( - 'Misshapen shifts on the primary grid; second axis size should be 3,' - ' shape is {}'.format(self.shifts.shape)) + f'Misshapen shifts on the primary grid; second axis size should be 3, shape is {self.shifts.shape}' + ) if self.comp_shifts.shape[1] != 3: GridError( - 'Misshapen shifts on the complementary grid: second axis size should be 3,' - ' shape is {}'.format(self.comp_shifts.shape)) + f'Misshapen shifts on the complementary grid: second axis size should be 3, shape is {self.comp_shifts.shape}' + ) if self.comp_shifts.shape[0] != self.shifts.shape[0]: GridError( 'Inconsistent number of shifts in the primary and complementary grid' ) - if not ((self.shifts >= 0).all() and (self.comp_shifts >= 0).all()): + if not (self.shifts >= 0).all() or not (self.comp_shifts >= 0).all(): GridError( 'Shifts are required to be non-negative for both primary and complementary grid' ) @@ -399,12 +394,12 @@ def __init__(self, if initial[i] is not None: self.grids_bg[i] = np.full( self.shape, initial[i], dtype=complex) - else: - if not np.array_equal(initial[i].shape, self.shape): - raise GridError( - 'Initial grid sizes must match given coordinates') + elif np.array_equal(initial[i].shape, self.shape): self.grids_bg[i] = initial[i] + else: + raise GridError( + 'Initial grid sizes must match given coordinates') if isinstance(ext_dir, Direction): self.ext_dir = ext_dir.value elif is_scalar(ext_dir): @@ -590,7 +585,7 @@ def draw_slab(self, dir_slab: Direction or float, center: float, dir_slab = dir_slab.value elif not is_scalar(dir_slab): raise GridError('Invalid slab direction') - elif not dir_slab in range(3): + elif dir_slab not in range(3): raise GridError('Invalid slab direction') if not is_scalar(center): @@ -608,7 +603,7 @@ def draw_slab(self, dir_slab: Direction or float, center: float, cuboid_cen = np.array( [self.center[a] if a != dir_slab else center for a in range(3)]) cuboid_extent = np.array([2*np.abs(self.exyz[a][-1]-self.exyz[a][0]) if a !=dir_slab \ - else thickness for a in range(3)]) + else thickness for a in range(3)]) self.draw_cuboid(cuboid_cen, cuboid_extent, eps) def fill_cuboid(self, fill_dir: Direction, fill_pol: int, @@ -627,12 +622,12 @@ def fill_cuboid(self, fill_dir: Direction, fill_pol: int, fill_dir = fill_dir.value elif not is_scalar(fill_dir): raise GridError('Invalid slab direction') - elif not dir_slab in range(3): + elif dir_slab not in range(3): raise GridError('Invalid slab direction') if not is_scalar(fill_pol): raise GridError('Invalid polarity') - if not fill_pol in [-1, 1]: + if fill_pol not in {-1, 1}: raise GridError('Invalid polarity') if surf_center.ndim != 1 or surf_center.size != 3: @@ -648,7 +643,7 @@ def fill_cuboid(self, fill_dir: Direction, fill_pol: int, cuboid_center = np.array([surf_center[a] if a != fill_dir else \ - (surf_center[a]+0.5*fill_pol*cuboid_extent[a]) for a in range(3)]) + (surf_center[a]+0.5*fill_pol*cuboid_extent[a]) for a in range(3)]) self.draw_cuboid(cuboid_center, cuboid_extent, eps) @@ -660,12 +655,12 @@ def fill_slab(self, fill_dir: Direction, fill_pol: int, surf_center: float, fill_dir = fill_dir.value elif not is_scalar(fill_dir): raise GridError('Invalid slab direction') - elif not dir_slab in range(3): + elif dir_slab not in range(3): raise GridError('Invalid slab direction') if not is_scalar(fill_pol): raise GridError('Invalid polarity') - if not fill_pol in [-1, 1]: + if fill_pol not in {-1, 1}: raise GridError('Invalid polarity') if not is_scalar(surf_center): @@ -687,7 +682,7 @@ def compute_layers(self): # Calculating the layer coordinates self.layer_z = np.sort(np.unique(np.array(self.list_z).flatten('F'))) - self.layer_polygons = [[] for i in range(self.layer_z.size - 1)] + self.layer_polygons = [[] for _ in range(self.layer_z.size - 1)] # Assigning polynomials into layers for i in range(len(self.list_polygons)): @@ -737,11 +732,7 @@ def compute_intersection(polygon_1, polygon_2): gds_poly2 = gdspy.Polygon(polygon_2, 0) gds_poly = gdspy.fast_boolean( gds_poly1, gds_poly2, 'not', layer=1) - if gds_poly is None: - return [] - else: - return gds_poly.polygons - + return [] if gds_poly is None else gds_poly.polygons else: return [polygon_1] @@ -954,7 +945,7 @@ def render(self, disable_intersection: bool = False): # Now all the layers and polygons should not intersect with each other and can be aliased on the grids for i, polygons in enumerate(self.reduced_layer_polygons): - for j, polygon in enumerate(self.reduced_layer_polygons[i]): + for polygon in self.reduced_layer_polygons[i]: # Iterating over each layer and rendering each polygon if polygon is not None: self.render_polygon( @@ -964,7 +955,7 @@ def render(self, disable_intersection: bool = False): eps=polygon[1]) # Finally, adding the background permittivity - for i in range(0, self.shifts.shape[0]): + for i in range(self.shifts.shape[0]): self.grids[i] = self.grids[i] + self.grids_bg[i] * self.frac_bg[i] def get_slice(self, diff --git a/spins/gridlock/selection_matrix.py b/spins/gridlock/selection_matrix.py index 55fd8a2..c1b5a4e 100644 --- a/spins/gridlock/selection_matrix.py +++ b/spins/gridlock/selection_matrix.py @@ -82,10 +82,11 @@ def average_2xFarEdge(grid_size, design_bounds): Sz_flat = scipy.sparse.vstack((zero_padding_t, Sy_flat_preshift, zero_padding_b)) - # Repmat to make a full 3D grid. - S = scipy.sparse.vstack([Sx_flat] * grid_size[2] + - [Sy_flat] * grid_size[2] + [Sz_flat] * grid_size[2]) - return S + return scipy.sparse.vstack( + [Sx_flat] * grid_size[2] + + [Sy_flat] * grid_size[2] + + [Sz_flat] * grid_size[2] + ) def direct_lattice_deprecated(grid_size, @@ -120,30 +121,34 @@ def direct_lattice_deprecated(grid_size, x_ind = x_ind.flatten(order='F') y_ind = y_ind.flatten(order='F') - num_gridxy = np.prod(grid_size[0:2]) + num_gridxy = np.prod(grid_size[:2]) num_design = np.prod(2 * design_area) mode = ['wrap', 'clip'] # X-grid - map_x = np.ravel_multi_index([ - design_bounds[0][0] + (x_ind + 1) // 2 - 1, - design_bounds[0][1] + y_ind // 2 - ], - grid_size[0:2], - order='F', - mode=mode[wrap_or_clip[0]]) + map_x = np.ravel_multi_index( + [ + design_bounds[0][0] + (x_ind + 1) // 2 - 1, + design_bounds[0][1] + y_ind // 2, + ], + grid_size[:2], + order='F', + mode=mode[wrap_or_clip[0]], + ) Sx_flat = scipy.sparse.csr_matrix( (np.ones(len(map_x)), (map_x, np.array(np.arange(len(map_x))))), shape=(num_gridxy, num_design)) # Y-grid - map_y = np.ravel_multi_index([ - design_bounds[0][0] + x_ind // 2, - design_bounds[0][1] + (y_ind + 1) // 2 - 1 - ], - grid_size[0:2], - order='F', - mode=mode[wrap_or_clip[1]]) + map_y = np.ravel_multi_index( + [ + design_bounds[0][0] + x_ind // 2, + design_bounds[0][1] + (y_ind + 1) // 2 - 1, + ], + grid_size[:2], + order='F', + mode=mode[wrap_or_clip[1]], + ) Sy_flat = scipy.sparse.csr_matrix( (np.ones(len(map_y)), (map_y, np.array(np.arange(len(map_y))))), shape=(num_gridxy, num_design)) @@ -151,9 +156,10 @@ def direct_lattice_deprecated(grid_size, # Z-grid map_z = np.ravel_multi_index( [design_bounds[0][0] + x_ind // 2, design_bounds[0][1] + y_ind // 2], - grid_size[0:2], + grid_size[:2], order='F', - mode=mode[wrap_or_clip[2]]) + mode=mode[wrap_or_clip[2]], + ) Sz_flat = scipy.sparse.csr_matrix( (np.ones(len(map_z)), (map_z, np.array(np.arange(len(map_z))))), shape=(num_gridxy, num_design)) @@ -176,10 +182,7 @@ def normalize_rows(S): Sz = scipy.sparse.vstack([Sz_flat] * grid_size[2]) S = scipy.sparse.vstack((Sx, Sy, Sz)) - if get_design_area: - return S, 2 * design_area - else: - return S + return (S, 2 * design_area) if get_design_area else S def calculate_design_bounds_yee(eps_bg: fdfd_tools.VecField, @@ -316,7 +319,7 @@ def direct_lattice_to_yee_grid(shape): Transform matrix from direct lattice to yee grid. """ - ind_yee = np.arange(np.prod(shape[0:2])).reshape(shape[0:2], order='F') + ind_yee = np.arange(np.prod(shape[:2])).reshape(shape[:2], order='F') ind_yee = np.array( [[v for v in val @@ -325,11 +328,13 @@ def direct_lattice_to_yee_grid(shape): for _ in (0, 1)]) # This doubles every element in the x and y direction. ind_yee = np.stack( - [ind_yee + iz * np.prod(shape[0:2]) for iz in range(shape[2])], axis=2) - ind_yee_direct = np.arange(4 * np.prod(shape[0:2])).reshape( - 2 * np.array(shape[0:2]), order='F') + [ind_yee + iz * np.prod(shape[:2]) for iz in range(shape[2])], axis=2 + ) + ind_yee_direct = np.arange(4 * np.prod(shape[:2])).reshape( + 2 * np.array(shape[:2]), order='F' + ) ind_yee_direct = np.stack(shape[2] * [ind_yee_direct], axis=2) - direct_size = 4 * np.prod(shape[0:2]) + direct_size = 4 * np.prod(shape[:2]) # x-grid ind_direct_shift = np.roll( @@ -395,11 +400,13 @@ def direct_lattice(grid_shape, design_bounds, get_design_area: bool = False): indexing='ij') mapping = np.ravel_multi_index( (ind0.flatten(order='F'), ind1.flatten(order='F')), - dims=2 * np.array(grid_shape[0:2]), - order='F') + dims=2 * np.array(grid_shape[:2]), + order='F', + ) struct2direct = scipy.sparse.csr_matrix( (np.ones(len(mapping)), (mapping, np.arange(len(mapping)))), - shape=(4 * np.prod(grid_shape[0:2]), np.prod(design_area))) + shape=(4 * np.prod(grid_shape[:2]), np.prod(design_area)), + ) # Calculate the transformation from structure to yee. direct2yee = direct_lattice_to_yee_grid(grid_shape) @@ -409,9 +416,7 @@ def direct_lattice(grid_shape, design_bounds, get_design_area: bool = False): struct2yee = direct2yee @ struct2direct - if get_design_area: - return struct2yee, design_area - return struct2yee + return (struct2yee, design_area) if get_design_area else struct2yee def create_selection_matrix( diff --git a/spins/invdes/optimization/gradient_descent.py b/spins/invdes/optimization/gradient_descent.py index acd63b0..d122525 100644 --- a/spins/invdes/optimization/gradient_descent.py +++ b/spins/invdes/optimization/gradient_descent.py @@ -77,11 +77,7 @@ def optimize(self, iters=None, callback=None): if self._max_iters: stop_iter_list.append(self._max_iters) - # Compute stopping iteration. - stop_iter = None - if stop_iter_list: - stop_iter = np.min(stop_iter_list) - + stop_iter = np.min(stop_iter_list) if stop_iter_list else None while True: stop_opt = self.iterate() # Increase iteration count and break if done. diff --git a/spins/invdes/optimization/problems.py b/spins/invdes/optimization/problems.py index f109425..3aa3593 100644 --- a/spins/invdes/optimization/problems.py +++ b/spins/invdes/optimization/problems.py @@ -198,10 +198,7 @@ def build_constrained_ellipsoidal_problem(): def build_constrained_problem_list(): # Construct a list of "standard" constrained problems. - prob = [] - prob.append(build_constrained_ellipsoidal_problem()) - for i in range(2): - prob.append(build_constrained_linear_problem(i)) - for i in range(3): - prob.append(build_constrained_quadratic_problem(i)) + prob = [build_constrained_ellipsoidal_problem()] + prob.extend(build_constrained_linear_problem(i) for i in range(2)) + prob.extend(build_constrained_quadratic_problem(i) for i in range(3)) return prob diff --git a/spins/invdes/optimization/scipy_optimizer.py b/spins/invdes/optimization/scipy_optimizer.py index 94dd2a7..fe64628 100644 --- a/spins/invdes/optimization/scipy_optimizer.py +++ b/spins/invdes/optimization/scipy_optimizer.py @@ -41,10 +41,9 @@ def __init__(self, self.tol = tol # Spins places tols in options, tol saved in self.tol and removed from self.options - if options: - if "tol" in options.keys(): - self.tol = options["tol"] - del self.options["tol"] + if options and "tol" in options.keys(): + self.tol = options["tol"] + del self.options["tol"] def __call__(self, opt, param, callback=None): if isinstance(opt, OptimizationFunction): diff --git a/spins/invdes/optimization/test_constrained_optimizer.py b/spins/invdes/optimization/test_constrained_optimizer.py index 76e1fc6..d458bff 100644 --- a/spins/invdes/optimization/test_constrained_optimizer.py +++ b/spins/invdes/optimization/test_constrained_optimizer.py @@ -39,25 +39,25 @@ def test_lagrangian_optimization(opt, param, ans): # prob1 opt, param, ans = problems.build_constrained_ellipsoidal_problem() out_param = optimizer(opt, param) - print(str(ans) + ' ' + str(out_param.get_structure())) + print(f'{str(ans)} {str(out_param.get_structure())}') # prob2 opt, param, ans = problems.build_constrained_linear_problem(0) out_param = optimizer(opt, param) - print(str(ans) + ' ' + str(out_param.get_structure())) + print(f'{str(ans)} {str(out_param.get_structure())}') # prob3 opt, param, ans = problems.build_constrained_linear_problem(1) out_param = optimizer(opt, param) - print(str(ans) + ' ' + str(out_param.get_structure())) + print(f'{str(ans)} {str(out_param.get_structure())}') # prob4 opt, param, ans = problems.build_constrained_quadratic_problem(0) out_param = optimizer(opt, param) - print(str(ans) + ' ' + str(out_param.get_structure())) + print(f'{str(ans)} {str(out_param.get_structure())}') # prob5 opt, param, ans = problems.build_constrained_quadratic_problem(1) out_param = optimizer(opt, param) - print(str(ans) + ' ' + str(out_param.get_structure())) + print(f'{str(ans)} {str(out_param.get_structure())}') # prob6 opt, param, ans = problems.build_constrained_quadratic_problem(2) out_param = optimizer(opt, param) - print(str(ans) + ' ' + str(out_param.get_structure())) + print(f'{str(ans)} {str(out_param.get_structure())}') print(time.time() - start) diff --git a/spins/invdes/parametrization/cubic_utils.py b/spins/invdes/parametrization/cubic_utils.py index 5ee633e..62e872f 100644 --- a/spins/invdes/parametrization/cubic_utils.py +++ b/spins/invdes/parametrization/cubic_utils.py @@ -180,8 +180,8 @@ def MakeXYcubic(x_vector: np.array, y_vector: np.array) -> (sparse.coo.coo_matri xy_x_diag_v = [] xy_y_diag_v = [] - for i in range(0, 4): - for j in range(0, 4): + for i in range(4): + for j in range(4): xy = x[i] * y[j] xy_x = x_x[i] * y[j] xy_y = x[i] * y_y[j] @@ -234,8 +234,8 @@ def MakeDXDY_inv(dx_vector: np.array, diff_x_diff_y = np.array([]) - for i in range(0, 4): - for j in range(0, 4): + for i in range(4): + for j in range(4): diff_x_diff_y = np.append(diff_x_diff_y, diff_y[i] * diff_x[j]) return sparse.diags(diff_x_diff_y, 0, shape=(16 * n_p, 16 * n_p)) @@ -312,8 +312,8 @@ def MakeXYcubic_secondDerivative( xy_xx_diag_v = [] xy_yy_diag_v = [] - for i in range(0, 4): - for j in range(0, 4): + for i in range(4): + for j in range(4): xy = x[i] * y[j] xy_x = x_x[i] * y[j] xy_y = x[i] * y_y[j] @@ -435,8 +435,7 @@ def Phi2fii( periodicity_matrix = sparse.eye( (len(xq_vector) - periodicity[0]) * (len(yq_vector) - periodicity[1])) if periodicity[0] == 1: - shp = (len(xq_vector), - len(yq_vector[0:len(yq_vector) - periodicity[1]])) + shp = len(xq_vector), len(yq_vector[:len(yq_vector) - periodicity[1]]) periodicity_matrix = duplicate_boundary_data(shp, 0)[0] @ periodicity_matrix if periodicity[1] == 1: @@ -506,11 +505,11 @@ def makeDxmatrix(m_size: np.array) -> sparse.csc.csc_matrix: ind_i = [] ind_j = [] values = [] - for i in range(0, m_size[0]): - for j in range(0, m_size[1]): + for i in range(m_size[0]): + for j in range(m_size[1]): ind = np.ravel_multi_index([i, j], m_size, order="F") + ind_i.extend([ind]) if i == m_size[0] - 1: - ind_i.extend([ind]) ind_j.extend( [np.ravel_multi_index([i - 1, j], m_size, order="F")]) values.extend([-1]) @@ -518,7 +517,6 @@ def makeDxmatrix(m_size: np.array) -> sparse.csc.csc_matrix: ind_j.extend([np.ravel_multi_index([i, j], m_size, order="F")]) values.extend([1]) elif i == 0: - ind_i.extend([ind]) ind_j.extend([np.ravel_multi_index([i, j], m_size, order="F")]) values.extend([-1]) ind_i.extend([ind]) @@ -526,7 +524,6 @@ def makeDxmatrix(m_size: np.array) -> sparse.csc.csc_matrix: [np.ravel_multi_index([i + 1, j], m_size, order="F")]) values.extend([1]) else: - ind_i.extend([ind]) ind_j.extend( [np.ravel_multi_index([i - 1, j], m_size, order="F")]) values.extend([-1 / 2]) @@ -543,11 +540,11 @@ def makeDymatrix(m_size: np.array) -> sparse.csc.csc_matrix: ind_i = [] ind_j = [] values = [] - for i in range(0, m_size[0]): - for j in range(0, m_size[1]): + for i in range(m_size[0]): + for j in range(m_size[1]): ind = np.ravel_multi_index([i, j], m_size, order="F") + ind_i.extend([ind]) if j == m_size[1] - 1: - ind_i.extend([ind]) ind_j.extend( [np.ravel_multi_index([i, j - 1], m_size, order="F")]) values.extend([-1]) @@ -555,7 +552,6 @@ def makeDymatrix(m_size: np.array) -> sparse.csc.csc_matrix: ind_j.extend([np.ravel_multi_index([i, j], m_size, order="F")]) values.extend([1]) elif j == 0: - ind_i.extend([ind]) ind_j.extend([np.ravel_multi_index([i, j], m_size, order="F")]) values.extend([-1]) ind_i.extend([ind]) @@ -563,7 +559,6 @@ def makeDymatrix(m_size: np.array) -> sparse.csc.csc_matrix: [np.ravel_multi_index([i, j + 1], m_size, order="F")]) values.extend([1]) else: - ind_i.extend([ind]) ind_j.extend( [np.ravel_multi_index([i, j - 1], m_size, order="F")]) values.extend([-1 / 2]) diff --git a/spins/invdes/parametrization/grating_parametrization.py b/spins/invdes/parametrization/grating_parametrization.py index c9bd71a..16cf48e 100644 --- a/spins/invdes/parametrization/grating_parametrization.py +++ b/spins/invdes/parametrization/grating_parametrization.py @@ -57,9 +57,9 @@ def __init__(self, """ # Validate that the number of edges are correct. if len(initial_value) % 2 == 1: - raise ValueError("The number of edges in the grating expected to " - "be even, got {} instead.".format( - len(initial_value))) + raise ValueError( + f"The number of edges in the grating expected to be even, got {len(initial_value)} instead." + ) # `self._edges` holds the state of the parametrization. The edges are # stored in ascending order. @@ -85,8 +85,8 @@ def get_structure(self) -> np.ndarray: """ # Compute the widths and the centers of the grating teeth i.e. regions # which have a value of 1. - widths = self._edges[1::2] - self._edges[0::2] - centers = 0.5 * (self._edges[1::2] + self._edges[0::2]) + widths = self._edges[1::2] - self._edges[::2] + centers = 0.5 * (self._edges[1::2] + self._edges[::2]) # Initialize the pixels in the structure to 0. pixel_vals = np.zeros(self._num_pixels) @@ -97,9 +97,7 @@ def get_structure(self) -> np.ndarray: np.array([center - 0.5 * width, center + 0.5 * width]), self._x_coords) - if self._inverted: - return 1 - pixel_vals - return pixel_vals + return 1 - pixel_vals if self._inverted else pixel_vals def calculate_gradient(self) -> linalg.LinearOperator: """Compute the gradient of the structure `z` with respect to edges. @@ -201,7 +199,4 @@ def serialize(self) -> Dict: def deserialize(self, data: Dict) -> None: super().deserialize(data) - if "inverted" in data: - self._inverted = data["inverted"] - else: - self._inverted = False + self._inverted = data["inverted"] if "inverted" in data else False diff --git a/spins/invdes/parametrization/parametrization.py b/spins/invdes/parametrization/parametrization.py index d3905ab..cef4ac4 100644 --- a/spins/invdes/parametrization/parametrization.py +++ b/spins/invdes/parametrization/parametrization.py @@ -239,10 +239,7 @@ def set_k(self, k): def get_structure(self) -> np.ndarray: z_cubic = self.vec2f @ self.vector - if self.k: - return 1 / (1 + np.exp(-self.k * (2 * z_cubic - 1))) - else: - return z_cubic + return 1 / (1 + np.exp(-self.k * (2 * z_cubic - 1))) if self.k else z_cubic def calculate_gradient(self) -> np.ndarray: z_cubic = self.vec2f @ self.vector @@ -491,6 +488,6 @@ def fit2eps(self, eps_bg, S, eps): def callback(v): nonlocal iter_num iter_num += 1 - print('fit2eps-continous: ' + str(iter_num)) + print(f'fit2eps-continous: {iter_num}') opt_cont(obj, self, callback=callback) diff --git a/spins/invdes/parametrization/test_parametrization.py b/spins/invdes/parametrization/test_parametrization.py index 11ebcc7..cb12958 100644 --- a/spins/invdes/parametrization/test_parametrization.py +++ b/spins/invdes/parametrization/test_parametrization.py @@ -101,9 +101,10 @@ def test_get_structure(self): val_k = 1 / (1 + np.exp(-par.k * (2 * val - 1))) structure = np.reshape(par.get_structure(), shp_f, order='F') np.testing.assert_allclose( - structure[0:shp_f[0] // 2], - np.flipud(structure[-shp_f[0] // 2 + 1:]), - rtol=1e-9) + structure[: shp_f[0] // 2], + np.flipud(structure[-shp_f[0] // 2 + 1 :]), + rtol=1e-9, + ) # sym 1 par = param.CubicParam( init_val, coarse_x, coarse_y, fine_x, fine_y, symmetry=[0, 1]) @@ -376,9 +377,10 @@ def test_get_structure(self): structure = np.reshape(par.get_structure(), shp_f, order='F') len_vec = 4 * shp_c[0] * shp_c[1] np.testing.assert_allclose( - structure[0:shp_f[0] // 2], - np.flipud(structure[-shp_f[0] // 2 + 1:]), - rtol=1e-9) + structure[: shp_f[0] // 2], + np.flipud(structure[-shp_f[0] // 2 + 1 :]), + rtol=1e-9, + ) # sym 1 par = param.HermiteParam( diff --git a/spins/invdes/problem/emobjective.py b/spins/invdes/problem/emobjective.py index 797e524..d0da4d9 100644 --- a/spins/invdes/problem/emobjective.py +++ b/spins/invdes/problem/emobjective.py @@ -35,10 +35,10 @@ def calculate_gradient(self, param: Parametrization) -> np.ndarray: total_df_dz = self.calculate_df_dz(efields, struct) # TODO(logansu): Cache gradient calculation. dz_dp = aslinearoperator(param.calculate_gradient()) - df_dp = np.conj(dz_dp.adjoint() @ np.conj( - total_df_dz + self.calculate_partial_df_dz(efields, struct))) - - return df_dp + return np.conj( + dz_dp.adjoint() + @ np.conj(total_df_dz + self.calculate_partial_df_dz(efields, struct)) + ) def calculate_df_dz(self, efields, struct): # Calculate df/dz using adjoint. @@ -49,9 +49,7 @@ def calculate_df_dz(self, efields, struct): d = self.adjoint_sim.simulate( struct, np.conj(partial_df_dx) / (-1j * self.sim.omega)) - total_df_dz = 2 * np.real(np.conj(np.transpose(d)) @ B) - - return total_df_dz + return 2 * np.real(np.conj(np.transpose(d)) @ B) def calculate_objective_function(self, parametrization): struct = parametrization.get_structure() @@ -164,8 +162,7 @@ def calculate_f(self, x, z): Cx = np.array(self.objective_C.H.dot(x)) Cx = np.squeeze(Cx) T = self.objective_T - f = np.abs(Cx - T) - return f + return np.abs(Cx - T) def get_phase(self, param: Parametrization): struct = param.get_structure() @@ -180,9 +177,7 @@ def get_overlap_norm2(self, param: Parametrization): struct = param.get_structure() efields = self.sim.simulate(struct) - # Calculate overlap. - overlap2 = np.abs(np.array(self.objective_C.H.dot(efields)))**2 - return overlap2 + return np.abs(np.array(self.objective_C.H.dot(efields)))**2 def get_electric_fields(self, param: Parametrization): return fdfd_tools.unvec( @@ -287,8 +282,7 @@ def calculate_f(self, x, z): beta = self.objective_beta f0 = ((overlap < alpha) * (alpha - overlap) + (beta < overlap) * (overlap - beta)) - f = np.sum(f0**self.pf) - return f + return np.sum(f0**self.pf) def get_phase(self, param: Parametrization): struct = param.get_structure() @@ -305,9 +299,7 @@ def get_overlap_norm2(self, param: Parametrization): struct = param.get_structure() efields = self.sim.simulate(struct) - # Calculate overlap. - overlap2 = np.abs(np.array(self.objective_C.H.dot(efields)))**2 - return overlap2 + return np.abs(np.array(self.objective_C.H.dot(efields)))**2 def get_electric_fields(self, param: Parametrization): return fdfd_tools.unvec( @@ -403,8 +395,7 @@ def calculate_f(self, x, z): Cx = np.squeeze(Cx) T_complex = self.objective_T_complex f0 = np.abs(Cx - T_complex) - f = np.sum(f0**self.pf) - return f + return np.sum(f0**self.pf) def get_phase(self, param: Parametrization): struct = param.get_structure() @@ -421,9 +412,7 @@ def get_overlap_norm2(self, param: Parametrization): struct = param.get_structure() efields = self.sim.simulate(struct) - # Calculate overlap. - overlap2 = np.abs(np.array(self.objective_C.H.dot(efields)))**2 - return overlap2 + return np.abs(np.array(self.objective_C.H.dot(efields)))**2 def get_electric_fields(self, param: Parametrization): return fdfd_tools.unvec( diff --git a/spins/invdes/problem/farfield.py b/spins/invdes/problem/farfield.py index 0286128..f6a3bde 100644 --- a/spins/invdes/problem/farfield.py +++ b/spins/invdes/problem/farfield.py @@ -132,9 +132,7 @@ def make_near2farfield_matrix(points: np.array, rm_radial = sparse.bmat([[zeros, zeros, zeros], [zeros, Id, zeros], [zeros, zeros, Id]]) - t = rm_radial @ t_sp - - return t + return rm_radial @ t_sp def make_near2farfield_box_matrix(points: np.array, @@ -353,14 +351,14 @@ def move2H_matrix(axis: int, av_Hx = sparse.eye(n) av_Hy = fwY @ bwX av_Hz = fwZ @ bwX - if axis == 1: + elif axis == 1: av_Ex = fwZ av_Ey = fwZ @ fwX @ bwY av_Ez = fwX av_Hx = fwX @ bwY av_Hy = sparse.eye(n) av_Hz = fwZ @ bwY - if axis == 2: + elif axis == 2: av_Ex = fwY av_Ey = fwX av_Ez = fwY @ fwX @ bwZ @@ -418,13 +416,13 @@ def make_fourier_matrix(x: np.array, y: np.array, z: np.array, d_area: np.array, zeros = sparse.csr_matrix( (single_fourier_matrix.shape[0], single_fourier_matrix.shape[1]), dtype=float) - fourier_matrix = sparse.vstack([ - sparse.hstack([single_fourier_matrix, zeros, zeros]), - sparse.hstack([zeros, single_fourier_matrix, zeros]), - sparse.hstack([zeros, zeros, single_fourier_matrix]) - ]) - - return fourier_matrix + return sparse.vstack( + [ + sparse.hstack([single_fourier_matrix, zeros, zeros]), + sparse.hstack([zeros, single_fourier_matrix, zeros]), + sparse.hstack([zeros, zeros, single_fourier_matrix]), + ] + ) def make_sphere_point(interpolation_count: int) -> (np.array, np.array): @@ -449,7 +447,7 @@ def make_sphere_point(interpolation_count: int) -> (np.array, np.array): triangles = np.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4], [0, 1, 5], [1, 2, 5], [2, 3, 5], [3, 0, 5]]) # refine mesh on sphere - for n in range(interpolation_count): + for _ in range(interpolation_count): for i in range(triangles.shape[0]): # Make points on the edges of every triangle p1 = points[triangles[i, 0]] + points[triangles[i, 1]] @@ -489,10 +487,11 @@ def make_sphere_point(interpolation_count: int) -> (np.array, np.array): [triangles[i, 1], index1, index2], [triangles[i, 2], index3, index2]]) # add the new triangles to the triangle list - if i == 0: - triangles_temp = new_triangles - else: - triangles_temp = np.vstack([triangles_temp, new_triangles]) + triangles_temp = ( + new_triangles + if i == 0 + else np.vstack([triangles_temp, new_triangles]) + ) # update the triangle list triangles = triangles_temp @@ -522,7 +521,7 @@ def make_half_sphere_point(interpolation_count: int, [0, 0, polarity]]) triangles = np.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]]) # refine mesh on sphere - for n in range(interpolation_count): + for _ in range(interpolation_count): for i in range(triangles.shape[0]): # Make points on the edges of every triangle p1 = points[triangles[i, 0]] + points[triangles[i, 1]] @@ -556,10 +555,11 @@ def make_half_sphere_point(interpolation_count: int, [triangles[i, 1], index1, index2], [triangles[i, 2], index3, index2]]) # add the new triangles to the triangle list - if i == 0: - triangles_temp = new_triangles - else: - triangles_temp = np.vstack([triangles_temp, new_triangles]) + triangles_temp = ( + new_triangles + if i == 0 + else np.vstack([triangles_temp, new_triangles]) + ) # update the triangle list triangles = triangles_temp @@ -611,9 +611,7 @@ def cart2spheric_matrix(x: np.array, y: np.array, z: np.array, t21 = sparse.csr_matrix(sp.sparse.diags(t_3d[2, 1] * np.ones_like(x))) t22 = sparse.csr_matrix(sp.sparse.diags(t_3d[2, 2] * np.ones_like(x))) - t = sparse.bmat([[t00, t01, t02], [t10, t11, t12], [t20, t21, t22]]) - - return t + return sparse.bmat([[t00, t01, t02], [t10, t11, t12], [t20, t21, t22]]) def spheric2cart_matrix(r: np.array, th: np.array, ph: np.array, @@ -649,9 +647,7 @@ def spheric2cart_matrix(r: np.array, th: np.array, ph: np.array, t21 = sparse.csr_matrix(sp.sparse.diags(t_3d[2, 1] * np.ones_like(x))) t22 = sparse.csr_matrix(sp.sparse.diags(t_3d[2, 2] * np.ones_like(x))) - t = sparse.bmat([[t00, t01, t02], [t10, t11, t12], [t20, t21, t22]]) - - return t + return sparse.bmat([[t00, t01, t02], [t10, t11, t12], [t20, t21, t22]]) def spheric2spheric_matrix(r: np.array, @@ -667,9 +663,7 @@ def spheric2spheric_matrix(r: np.array, sp2cart = spheric2cart_matrix(r, th, ph, initial_axis) cart2sp = cart2spheric_matrix(x, y, z, new_axis) - t = cart2sp @ sp2cart - - return t + return cart2sp @ sp2cart # Functions needed to plot the far field @@ -731,9 +725,7 @@ def points2triangles_averaging_matrix(points: np.ndarray, colind = flatten(triangles) data = 1 / 3 * np.ones_like(rowind) - av_matrix = sparse.csr_matrix((data, (rowind, colind))) - - return av_matrix + return sparse.csr_matrix((data, (rowind, colind))) def area_selection_vector(points: np.ndarray, triangles: np.ndarray, @@ -793,11 +785,15 @@ def triangle_area_vector(points: np.ndarray, p0 = points[triangles[:, 1]] - points[triangles[:, 0]] p1 = points[triangles[:, 2]] - points[triangles[:, 0]] - area = 0.5 * ((p0[:, 1] * p1[:, 2] - p0[:, 2] * p1[:, 1])**2 + - (p0[:, 2] * p1[:, 0] - p0[:, 0] * p1[:, 2])**2 + - (p0[:, 0] * p1[:, 1] - p0[:, 1] * p1[:, 0])**2)**0.5 - - return area + return ( + 0.5 + * ( + (p0[:, 1] * p1[:, 2] - p0[:, 2] * p1[:, 1]) ** 2 + + (p0[:, 2] * p1[:, 0] - p0[:, 0] * p1[:, 2]) ** 2 + + (p0[:, 0] * p1[:, 1] - p0[:, 1] * p1[:, 0]) ** 2 + ) + ** 0.5 + ) # main for quick test diff --git a/spins/invdes/problem/ffobjective.py b/spins/invdes/problem/ffobjective.py index aa2ae10..1f64cf6 100644 --- a/spins/invdes/problem/ffobjective.py +++ b/spins/invdes/problem/ffobjective.py @@ -95,29 +95,19 @@ def _compute_objective(self, FF_cond): self.FarField = np.ones(3 * n_points) def calculate_partial_df_dx(self, x, z): - if self.E_background is not None: - E = x - E_background - else: - E = x - + E = x - E_background if self.E_background is not None else x self.FarField = self.FF_projection_matrix @ E - ddirected_power_viol_dx = 0.5 * self.directed_power_vector @ \ - scipy.sparse.diags(np.conj(self.FarField)) @ \ - self.FF_projection_matrix - - return ddirected_power_viol_dx + return ( + 0.5 + * self.directed_power_vector + @ scipy.sparse.diags(np.conj(self.FarField)) + @ self.FF_projection_matrix + ) def calculate_f(self, x, z): - if self.E_background is not None: - E = x - E_background - else: - E = x - + E = x - E_background if self.E_background is not None else x self.FarField = self.FF_projection_matrix @ x - directed_power = 0.5 * self.directed_power_vector @ np.abs( - self.FarField)**2 - - return directed_power + return 0.5 * self.directed_power_vector @ np.abs(self.FarField) ** 2 def get_electric_fields(self, param: Parametrization): # TODO(logansu): Refactor please @@ -194,16 +184,12 @@ def _compute_objective(self, FF_cond): self.FarField = np.ones(3 * n_p) def calculate_partial_df_dx(self, x, z): - if self.E_background is not None: - E = x - E_background - else: - E = x - + E = x - E_background if self.E_background is not None else x #calculate far field FF_mat = self.FF_projection_matrix FarField = FF_mat @ E dFarField_square_viol_dx = scipy.sparse.diags(np.conj(FarField)) @ \ - FF_mat + FF_mat #calculate total and directed scattered power n_points = self.points.size sum_matrix = scipy.sparse.hstack([ @@ -214,29 +200,24 @@ def calculate_partial_df_dx(self, x, z): scattered_power = self.scattered_power_vector @ np.abs(FarField)**2 directed_power = self.directed_power_vector @ np.abs(FarField)**2 dscattered_power_viol_dx = self.scattered_power_vector @ \ - dFarField_square_viol_dx + dFarField_square_viol_dx ddirected_power_viol_dx = self.directed_power_vector @ \ - dFarField_square_viol_dx + dFarField_square_viol_dx #calculate directivity directivity = directed_power / scattered_power ddirectivity_viol_dx = ( ddirected_power_viol_dx*scattered_power - \ - directed_power*dscattered_power_viol_dx)/ \ - scattered_power**2 + directed_power*dscattered_power_viol_dx)/ \ + scattered_power**2 #objection function alpha = self.objective_alpha f0 = (directivity < alpha) * (alpha - directivity) df0_viol_dx = (directivity < alpha) * (-1) * ddirectivity_viol_dx - df_viol_dx = self.pf * f0**(self.pf - 1) * df0_viol_dx - - return df_viol_dx + return self.pf * f0**(self.pf - 1) * df0_viol_dx def calculate_f(self, x, z): #Test for background - if self.E_background is not None: - E = x - E_background - else: - E = x + E = x - E_background if self.E_background is not None else x #calculate far field self.FarField = self.FF_projection_matrix @ E @@ -250,9 +231,7 @@ def calculate_f(self, x, z): #objection function alpha = self.objective_alpha f0 = (self.directivity < alpha) * (alpha - self.directivity) - f = np.sum(f0**self.pf) - - return f + return np.sum(f0**self.pf) def get_electric_fields(self, param: Parametrization): # TODO(logansu): Refactor please diff --git a/spins/invdes/problem/graph_executor.py b/spins/invdes/problem/graph_executor.py index f214d71..7d3521e 100644 --- a/spins/invdes/problem/graph_executor.py +++ b/spins/invdes/problem/graph_executor.py @@ -87,9 +87,7 @@ def eval_fun( _eval_fun_vals(fun_vals, fun_map, graph, top_sorted_nodes, param) ret_vals = [fun_vals[out_node] for out_node in out_nodes] - if single_output: - return ret_vals[0] - return ret_vals + return ret_vals[0] if single_output else ret_vals def _eval_fun_vals(fun_vals: Dict[NodeId, np.ndarray], fun_map: FunctionMap, @@ -256,11 +254,7 @@ def _eval_grad(node, input_vals, grad_val): node = next(node_iter) while True: - while True: - # Postpone any heavy computation. - if _is_heavy_fun(fun_map[node]): - break - + while True and not _is_heavy_fun(fun_map[node]): if _is_old_fun(fun_map[node]): old_grad = fun_map[node].calculate_gradient(param) # Use `np.dot` since either operand could be a scalar @@ -349,7 +343,7 @@ def _create_computational_graph( # To identify functions, we use `id`, which is guaranteed to be unique for # two different objects (for CPython, this is simply the memory address). out_nodes = [id(fun) for fun in fun_list] - fun_map = {node: fun for node, fun in zip(out_nodes, fun_list)} + fun_map = dict(zip(out_nodes, fun_list)) # Set of functions that do not have inputs. in_nodes = set() @@ -384,7 +378,7 @@ def _create_computational_graph( graph[node].append(next_fun_id) - if next_fun_id in fun_map.keys(): + if next_fun_id in fun_map: continue fun_map[next_fun_id] = next_fun diff --git a/spins/invdes/problem/grating.py b/spins/invdes/problem/grating.py index be0843d..04efaed 100644 --- a/spins/invdes/problem/grating.py +++ b/spins/invdes/problem/grating.py @@ -52,8 +52,7 @@ def __init__(self, min_feature: float, described above. """ if min_feature < 0: - raise ValueError( - "Minimum feature must be positive, got {}".format(min_feature)) + raise ValueError(f"Minimum feature must be positive, got {min_feature}") self._min_feature = min_feature self._edge_cons_scale = boundary_constraint_scale @@ -78,14 +77,10 @@ def calculate_gradient( # parametrizations are currently handled as a single composite # parametrization. if isinstance(param, parametrization.CompositeParam): - # The gradient is block-diagonal, with one block per - # sub-parametrization. - grad_blocks = [] - - for subparam in param._params: - grad_blocks.append( - self._build_constraint_grads(subparam.to_vector())) - + grad_blocks = [ + self._build_constraint_grads(subparam.to_vector()) + for subparam in param._params + ] return scipy.linalg.block_diag(*grad_blocks) return self._build_constraint_grads(param.to_vector()) @@ -105,10 +100,12 @@ def _build_constraints(self, vec: np.ndarray, Returns: List of constraints. """ - constraints = [] - # The minimum feature constraint. - constraints.append(-(_diff_matrix(len(vec)) @ vec - - np.ones(len(vec) - 1) * self._min_feature)) + constraints = [ + -( + _diff_matrix(len(vec)) @ vec + - np.ones(len(vec) - 1) * self._min_feature + ) + ] # Lower bounding the left and right edges. Note that we keep them at # least 2 times the minimum feature constraint because the selection # matrix at the edges may not be aligned to the grid. @@ -128,10 +125,7 @@ def _build_constraint_grads(self, vec: np.ndarray) -> List[np.ndarray]: Returns: List of constraint gradients. """ - constraints = [] - - # The minimum feature constraint. - constraints.append(-_diff_matrix(len(vec))) + constraints = [-_diff_matrix(len(vec))] lower_bound = np.zeros(len(vec)) lower_bound[0] = -1 diff --git a/spins/invdes/problem/objective.py b/spins/invdes/problem/objective.py index 028924a..6154b64 100644 --- a/spins/invdes/problem/objective.py +++ b/spins/invdes/problem/objective.py @@ -499,7 +499,7 @@ def grad(self, input_vals: List[np.ndarray], return [grad_res] def __str__(self): - return "p[" + str(self._index) + "]" + return f"p[{str(self._index)}]" class Sum(OptimizationFunction): @@ -521,8 +521,7 @@ def __init__(self, if weights is not None: self.objectives = [] - for i, obj in enumerate(objectives): - self.objectives.append(weights[i] * obj) + self.objectives.extend(weights[i] * obj for i, obj in enumerate(objectives)) else: self.objectives = objectives @@ -553,7 +552,7 @@ def __add__(self, obj): def __str__(self): string = "(" + " + ".join(str(obj) for obj in self.objectives) + ")" if self.parallelize: - string = "||" + string + string = f"||{string}" return string @@ -610,7 +609,7 @@ def __str__(self): # is kept in one Product object. string = "({0})".format(" * ".join(str(obj) for obj in self.objectives)) if self.parallelize: - string = "||" + string + string = f"||{string}" return string @@ -637,7 +636,7 @@ def grad(self, input_vals: List[np.ndarray], return [grad_val * self.power * input_vals[0]**(self.power - 1)] def __str__(self): - return str(self.obj) + "**" + str(self.power) + return f"{str(self.obj)}**{str(self.power)}" # TODO(logansu): Fix gradients here. The fundamental issue is that for @@ -670,7 +669,7 @@ def grad(self, input_vals: List[np.ndarray], return [grad_val * grad] def __str__(self): - return "abs({})".format(self._inputs[0]) + return f"abs({self._inputs[0]})" class IndicatorPlus(OptimizationFunction): @@ -821,9 +820,10 @@ def grad(self, input_vals: List[np.ndarray], grad_val: np.ndarray) -> List[np.ndarray]: max_val = np.max(input_vals) denom = np.sum(np.exp(np.array(input_vals) - max_val)) - grads = [] - for i in range(len(self.objectives)): - grads.append(1 / denom * np.exp(input_vals[i] - max_val)) + grads = [ + 1 / denom * np.exp(input_vals[i] - max_val) + for i in range(len(self.objectives)) + ] return grad_val * np.array(grads) def __str__(self): diff --git a/spins/invdes/problem/selection_matrix.py b/spins/invdes/problem/selection_matrix.py index 38743e7..572afd9 100644 --- a/spins/invdes/problem/selection_matrix.py +++ b/spins/invdes/problem/selection_matrix.py @@ -76,10 +76,11 @@ def average_2xFarEdge(grid_size, design_bounds): Sz_flat = scipy.sparse.vstack((zero_padding_t, Sy_flat_preshift, zero_padding_b)) - # Repmat to make a full 3D grid. - S = scipy.sparse.vstack([Sx_flat] * grid_size[2] + - [Sy_flat] * grid_size[2] + [Sz_flat] * grid_size[2]) - return S + return scipy.sparse.vstack( + [Sx_flat] * grid_size[2] + + [Sy_flat] * grid_size[2] + + [Sz_flat] * grid_size[2] + ) def direct_lattice(grid_size, @@ -114,7 +115,7 @@ def direct_lattice(grid_size, x_ind = x_ind.flatten(order='F') y_ind = y_ind.flatten(order='F') - num_gridxy = np.prod(grid_size[0:2]) + num_gridxy = np.prod(grid_size[:2]) num_design = np.prod(2 * design_area) mode = ['wrap', 'clip'] @@ -122,11 +123,12 @@ def direct_lattice(grid_size, map_x = np.ravel_multi_index( [ design_bounds[0][0] + (x_ind + 1) // 2 - 1, - design_bounds[0][1] + y_ind // 2 + design_bounds[0][1] + y_ind // 2, ], - grid_size[0:2], + grid_size[:2], order='F', - mode=mode[wrap_or_clip[0]]) + mode=mode[wrap_or_clip[0]], + ) Sx_flat = scipy.sparse.csr_matrix( (np.ones(len(map_x)), (map_x, np.array(np.arange(len(map_x))))), shape=(num_gridxy, num_design)) @@ -135,11 +137,12 @@ def direct_lattice(grid_size, map_y = np.ravel_multi_index( [ design_bounds[0][0] + x_ind // 2, - design_bounds[0][1] + (y_ind + 1) // 2 - 1 + design_bounds[0][1] + (y_ind + 1) // 2 - 1, ], - grid_size[0:2], + grid_size[:2], order='F', - mode=mode[wrap_or_clip[1]]) + mode=mode[wrap_or_clip[1]], + ) Sy_flat = scipy.sparse.csr_matrix( (np.ones(len(map_y)), (map_y, np.array(np.arange(len(map_y))))), shape=(num_gridxy, num_design)) @@ -147,9 +150,10 @@ def direct_lattice(grid_size, # Z-grid map_z = np.ravel_multi_index( [design_bounds[0][0] + x_ind // 2, design_bounds[0][1] + y_ind // 2], - grid_size[0:2], + grid_size[:2], order='F', - mode=mode[wrap_or_clip[2]]) + mode=mode[wrap_or_clip[2]], + ) Sz_flat = scipy.sparse.csr_matrix( (np.ones(len(map_z)), (map_z, np.array(np.arange(len(map_z))))), shape=(num_gridxy, num_design)) @@ -172,7 +176,4 @@ def normalize_rows(S): Sz = scipy.sparse.vstack([Sz_flat] * grid_size[2]) S = scipy.sparse.vstack((Sx, Sy, Sz)) - if get_design_area: - return S, 2 * design_area - else: - return S + return (S, 2 * design_area) if get_design_area else S diff --git a/spins/invdes/problem/simulation.py b/spins/invdes/problem/simulation.py index b384270..b306a17 100644 --- a/spins/invdes/problem/simulation.py +++ b/spins/invdes/problem/simulation.py @@ -54,11 +54,7 @@ def __init__(self, self.pmc = pmc self.pemc = pemc self.symmetry = symmetry - if bloch_vec is None: - self.bloch_vec = np.zeros(3) - else: - self.bloch_vec = bloch_vec - + self.bloch_vec = np.zeros(3) if bloch_vec is None else bloch_vec # For caching uses. self.cache = [None] * cache_size self.lock = threading.Lock() @@ -237,9 +233,7 @@ def _run_solver(self, z: np.ndarray, J: np.ndarray) -> np.ndarray: for k in range(3): new_J[k] /= np.conj(s[k]) new_J = fdfd_tools.vec(new_J) - mu = None - if self.sim.mu is not None: - mu = np.conj(fdfd_tools.vec(self.sim.mu)) + mu = np.conj(fdfd_tools.vec(self.sim.mu)) if self.sim.mu is not None else None sim_args = { 'omega': np.conj(self.sim.omega), 'dxes': dxes, diff --git a/spins/invdes/problem/slack_optimization_problem.py b/spins/invdes/problem/slack_optimization_problem.py index 101fdb2..6ce0233 100644 --- a/spins/invdes/problem/slack_optimization_problem.py +++ b/spins/invdes/problem/slack_optimization_problem.py @@ -35,15 +35,15 @@ def __init__(self, opt): """ cons_ineq = opt.get_inequality_constraints() self.num_slack = len(cons_ineq) - cons_eq = [] - # Perform shallow copy of equality constraints. - for eq in opt.get_equality_constraints(): - cons_eq.append(SlackRemover(eq, self.num_slack)) + cons_eq = [ + SlackRemover(eq, self.num_slack) + for eq in opt.get_equality_constraints() + ] # Convert inequality constraints into equalities. - for i, ineq in enumerate(cons_ineq): - cons_eq.append( - SlackRemover(ineq, self.num_slack) + - SlackVariable(self.num_slack, i)) + cons_eq.extend( + SlackRemover(ineq, self.num_slack) + SlackVariable(self.num_slack, i) + for i, ineq in enumerate(cons_ineq) + ) super().__init__( SlackRemover(opt.get_objective(), self.num_slack), cons_eq=cons_eq) diff --git a/spins/invdes/problem/structure_objectives.py b/spins/invdes/problem/structure_objectives.py index 9ce0e86..c0793f6 100644 --- a/spins/invdes/problem/structure_objectives.py +++ b/spins/invdes/problem/structure_objectives.py @@ -64,29 +64,27 @@ def calculate_objective_function(self, param): if self.method == 1: return self.weight_factor * param.calculate_gap_penalty( np.pi / (1.20 * self.d)) - if self.method == 2: - curv = self.weight_factor * param.calculate_curv_penalty( - np.pi / (1.15 * self.d)) - gap = self.weight_factor * param.calculate_gap_penalty( - np.pi / (1.20 * self.d)) - return curv + gap - else: + if self.method != 2: return self.weight_factor * param.calculate_gap_penalty( np.pi / (1.20 * self.d)) + curv = self.weight_factor * param.calculate_curv_penalty( + np.pi / (1.15 * self.d)) + gap = self.weight_factor * param.calculate_gap_penalty( + np.pi / (1.20 * self.d)) + return curv + gap def calculate_gradient(self, param): if self.method == 1: return self.weight_factor * param.calculate_gap_penalty_gradient( np.pi / (1.20 * self.d)) - if self.method == 2: - curv = self.weight_factor * param.calculate_curv_penalty_gradient( - np.pi / (1.15 * self.d)) - gap = self.weight_factor * param.calculate_gap_penalty_gradient( - np.pi / (1.20 * self.d)) - return curv + gap - else: + if self.method != 2: return self.weight_factor * param.calculate_gap_penalty_gradient( np.pi / (1.20 * self.d)) + curv = self.weight_factor * param.calculate_curv_penalty_gradient( + np.pi / (1.15 * self.d)) + gap = self.weight_factor * param.calculate_gap_penalty_gradient( + np.pi / (1.20 * self.d)) + return curv + gap def __str__(self): - return 'FabCon(' + str(self.d) + ')' + return f'FabCon({str(self.d)})' diff --git a/spins/invdes/problem/test_emobjective.py b/spins/invdes/problem/test_emobjective.py index 21eee60..70d4e78 100644 --- a/spins/invdes/problem/test_emobjective.py +++ b/spins/invdes/problem/test_emobjective.py @@ -35,9 +35,9 @@ def test_gradient(self): # Create a 3x3 2D grid to brute force check adjoint gradients. shape = [3, 3, 1] # Setup epsilon (pure vacuum). - epsilon = [np.ones(shape) for i in range(3)] + epsilon = [np.ones(shape) for _ in range(3)] # Setup dxes. Assume dx = 40. - dxes = [[np.ones(shape[i]) * 40 for i in range(3)] for j in range(2)] + dxes = [[np.ones(shape[i]) * 40 for i in range(3)] for _ in range(2)] # Setup a point source in the center. J = [np.zeros(shape) for i in range(3)] J[2][1, 1, 0] = 1 diff --git a/spins/invdes/problem/test_graph_executor.py b/spins/invdes/problem/test_graph_executor.py index 15f65e7..6f6206f 100644 --- a/spins/invdes/problem/test_graph_executor.py +++ b/spins/invdes/problem/test_graph_executor.py @@ -163,14 +163,14 @@ def test_top_sort_affinity(): "g": ["h"], "h": [] } - affinity_nodes = set(["b", "g", "f"]) + affinity_nodes = {"b", "g", "f"} sorted_nodes = graph_executor._top_sort_affinity(graph, affinity_nodes) # The topological ordering must be a[cde][bf]gh where square brackets denote # that any combination is acceptable. assert sorted_nodes[0] == "a" - assert set(sorted_nodes[1:4]) == set(["c", "d", "e"]) - assert set(sorted_nodes[4:6]) == set(["b", "f"]) + assert set(sorted_nodes[1:4]) == {"c", "d", "e"} + assert set(sorted_nodes[4:6]) == {"b", "f"} assert sorted_nodes[6] == "g" assert sorted_nodes[7] == "h" diff --git a/spins/invdes/problem/test_objective.py b/spins/invdes/problem/test_objective.py index 877b39a..c346981 100644 --- a/spins/invdes/problem/test_objective.py +++ b/spins/invdes/problem/test_objective.py @@ -276,15 +276,6 @@ def test_gradient(self): def test_matrix(self): """ Test sum calculation for matrix multiplication. """ - # TODO(logansu): Enable test. - if False: - # Sum 1x1 variable against 2x1 constant. - param = DirectParam([1, 2]) - obj = objective.Sum([ValueSlice(1), Constant(np.array([1, 2]))]) - np.testing.assert_array_equal( - obj.calculate_objective_function(param), [2, 4]) - np.testing.assert_array_equal( - obj.calculate_gradient(param), [[0, 2], [0, 4]]) def test_weighted_sum(self): param = DirectParam([1, 2, 3]) @@ -380,15 +371,6 @@ def test_gradient(self): def test_matrix(self): """ Test product calculation for matrix multiplication. """ - # TODO(logansu): Enable test. - # Multiply 1x1 variable against 2x1 constant. - if False: - param = DirectParam([1, 2]) - obj = objective.Product([ValueSlice(1), Constant(np.array([1, 2]))]) - np.testing.assert_array_equal( - obj.calculate_objective_function(param), [2, 4]) - np.testing.assert_array_equal( - obj.calculate_gradient(param), [[0, 2], [0, 4]]) def test_string(self): obj = objective.Product( @@ -938,7 +920,7 @@ def grad(self, input_vals, grad_val): return self.value def __str__(self): - return 'dummy(' + str(self.value) + ')' + return f'dummy({str(self.value)})' def test_sum(self): # Test that sum operation works as intended. diff --git a/spins/invdes/problem_graph/creator_em.py b/spins/invdes/problem_graph/creator_em.py index cfbd252..f8cf47b 100644 --- a/spins/invdes/problem_graph/creator_em.py +++ b/spins/invdes/problem_graph/creator_em.py @@ -141,9 +141,7 @@ def __call__( bloch_vector=kvector, ) - if self._params.overwrite_bloch_vector: - return source, kvector - return source + return (source, kvector) if self._params.overwrite_bloch_vector else source @optplan.register_node(optplan.GaussianSource) @@ -428,7 +426,7 @@ def _simulate_adjoint(self, eps: np.ndarray, return electric_fields def __str__(self): - return "Simulation({})".format(self._wlen) + return f"Simulation({self._wlen})" def _create_solver(solver_name: str, simspace: SimulationSpace) -> Callable: @@ -441,16 +439,16 @@ def _create_solver(solver_name: str, simspace: SimulationSpace) -> Callable: Returns: A callable solver object. """ - if solver_name == "maxwell_cg": - from spins.fdfd_solvers.maxwell import MaxwellSolver - solver = MaxwellSolver(simspace.dims, solver="CG") + if solver_name == "local_direct": + solver = DIRECT_SOLVER elif solver_name == "maxwell_bicgstab": from spins.fdfd_solvers.maxwell import MaxwellSolver solver = MaxwellSolver(simspace.dims, solver="biCGSTAB") - elif solver_name == "local_direct": - solver = DIRECT_SOLVER + elif solver_name == "maxwell_cg": + from spins.fdfd_solvers.maxwell import MaxwellSolver + solver = MaxwellSolver(simspace.dims, solver="CG") else: - raise ValueError("Unknown solver, got {}".format(solver_name)) + raise ValueError(f"Unknown solver, got {solver_name}") return solver @@ -539,7 +537,7 @@ def grad(self, input_vals: List[np.ndarray], ] def __str__(self): - return "Epsilon({})".format(self._wlen) + return f"Epsilon({self._wlen})" @optplan.register_node(optplan.Epsilon) @@ -625,7 +623,7 @@ def __call__(self, simspace: SimulationSpace, wlen: float = None, overlap_coords[2].size ]) singleton_dims = np.where(coord_dims == 1)[0] - if not singleton_dims.size == 0: + if singleton_dims.size != 0: for axis in singleton_dims: # The dx from the SPINS simulation grid is borrowed for the replication. dx = dxyz[axis][0] @@ -639,7 +637,7 @@ def __call__(self, simspace: SimulationSpace, wlen: float = None, overlap[comp] = np.repeat(overlap[comp], overlap_coords[axis].size, axis) - for i in range(0, 3): + for i in range(3): # Interpolate the user-specified overlap fields for use on the simulation grids overlap_interp_function = RegularGridInterpolator( @@ -709,7 +707,7 @@ def grad(self, input_vals: List[np.ndarray], return [grad_val * self.overlap_vector] def __str__(self): - return "Overlap({})".format(self._input) + return f"Overlap({self._input})" @optplan.register_node(optplan.Overlap) diff --git a/spins/invdes/problem_graph/creator_param.py b/spins/invdes/problem_graph/creator_param.py index fb5afcf..f1c7701 100644 --- a/spins/invdes/problem_graph/creator_param.py +++ b/spins/invdes/problem_graph/creator_param.py @@ -48,8 +48,9 @@ def create_grating_param( # Only one of the design areas is nonzero. Figure out which one. design_dims = work.get_object(params.simulation_space).design_dims if design_dims[0] > 1 and design_dims[1] > 1: - raise ValueError("Grating parametrization should have 1D design " - "area, got {}".format(design_dims)) + raise ValueError( + f"Grating parametrization should have 1D design area, got {design_dims}" + ) grating_len = np.max(design_dims) return parametrization.GratingParam([], @@ -128,8 +129,7 @@ def create_cubic_or_hermite_levelset( elif params.type == "parametrization.cubic": param_class = parametrization.CubicParam else: - raise ValueError("Unexpected parametrization type, got {}".format( - params.type)) + raise ValueError(f"Unexpected parametrization type, got {params.type}") return param_class(initial_value=init_val, coarse_x=coarse_x, @@ -238,7 +238,7 @@ def calculate_gradient(self, param) -> List[np.ndarray]: return gradient def __str__(self): - return 'FabCon(' + str(self.d_gap) + ')' + return f'FabCon({str(self.d_gap)})' @optplan.register_node(optplan.FabricationConstraint) diff --git a/spins/invdes/problem_graph/functions/poynting.py b/spins/invdes/problem_graph/functions/poynting.py index d1c9fbd..f198878 100644 --- a/spins/invdes/problem_graph/functions/poynting.py +++ b/spins/invdes/problem_graph/functions/poynting.py @@ -80,7 +80,7 @@ def __init__( # Create a filter that sets a 1 in every position that is included in # the computation of the Poynting vector. - filter_grid = [np.zeros(simspace.dims) for i in range(3)] + filter_grid = [np.zeros(simspace.dims) for _ in range(3)] filter_grid[self._axis][tuple(self._plane_slice)] = 1 self._filter_vec = fdfd_tools.vec(filter_grid) diff --git a/spins/invdes/problem_graph/grating.py b/spins/invdes/problem_graph/grating.py index 144056e..3f0b790 100644 --- a/spins/invdes/problem_graph/grating.py +++ b/spins/invdes/problem_graph/grating.py @@ -29,8 +29,9 @@ def __init__(self, param: parametrization.DirectParam, min_feature: float, # Calculate in terms of pixels. self._min_feature = min_feature / dx if self._min_feature < 1: - raise ValueError("Minimum feature size must be larger than one " - "grid spacing, got {}".format(min_feature)) + raise ValueError( + f"Minimum feature size must be larger than one grid spacing, got {min_feature}" + ) self._param = param @@ -49,11 +50,7 @@ def __call__(self, param: parametrization.GratingParam, # To remedy, prepend a rising edge at the left boundary and append a # falling edge at the end, EXCEPT if there is already an edge # already there. - if edge_loc[0] == 0: - edge_loc = edge_loc[1:] - else: - edge_loc = [0] + edge_loc - + edge_loc = edge_loc[1:] if edge_loc[0] == 0 else [0] + edge_loc grating_len = len(self._param.to_vector()) if edge_loc[-1] == grating_len: edge_loc = edge_loc[:-1] @@ -124,7 +121,7 @@ def _get_edge_loc_dp(x: List[float], min_feature: float = 0) -> np.ndarray: # min_feature size in terms of grid units. d = int(min_feature * divisions) - struct_dp = [[] for i in range(d)] + struct_dp = [[] for _ in range(d)] # k = this right edge dp[0] = 0 @@ -176,6 +173,4 @@ def _get_edge_loc_dp(x: List[float], min_feature: float = 0) -> np.ndarray: func(x[k_int], 1 - k_frac) * (1 - k_frac)) struct_best_ind = np.argmin(dp[d:]) + d - edge_loc = struct_dp[struct_best_ind] - - return edge_loc + return struct_dp[struct_best_ind] diff --git a/spins/invdes/problem_graph/log_tools/loader.py b/spins/invdes/problem_graph/log_tools/loader.py index ca86766..947cb5f 100644 --- a/spins/invdes/problem_graph/log_tools/loader.py +++ b/spins/invdes/problem_graph/log_tools/loader.py @@ -112,12 +112,11 @@ def load_all_logs(log_dir: str) -> Dict: Returns: List of the dictionaries contained in the optimization pickle files. """ - # Create a list of all the log files in the log directory. - logs_name_list = [] - for dir_file in os.listdir(log_dir): - if dir_file.endswith(LOG_EXTENSION): - logs_name_list.append(dir_file) - + logs_name_list = [ + dir_file + for dir_file in os.listdir(log_dir) + if dir_file.endswith(LOG_EXTENSION) + ] # Create a list of all the log dictionaries loaded from each log file. log_dict_list = [] for file_name in logs_name_list: @@ -289,12 +288,7 @@ def get_overlap_monitor_names(log_df: pd.DataFrame) -> List[str]: List of overlap monitor name strings. """ monitor_names = get_monitor_name_by_type(log_df) - overlap_names = [] - for name in monitor_names.scalars: - # Currently, all overlap monitors have an autogen name including "overlap". - if "overlap" in name.lower(): - overlap_names.append(name) - return overlap_names + return [name for name in monitor_names.scalars if "overlap" in name.lower()] def get_joined_scalar_monitors( @@ -361,8 +355,7 @@ def process_scalar(data: Union[float, List], elif scalar_operation.lower() == "imag": return np.imag(data) else: - raise ValueError( - "Unknown scalar operation, got {}".format(scalar_operation)) + raise ValueError(f"Unknown scalar operation, got {scalar_operation}") def process_field(field: List, diff --git a/spins/invdes/problem_graph/log_tools/monitor_spec.py b/spins/invdes/problem_graph/log_tools/monitor_spec.py index 59351b6..8324757 100644 --- a/spins/invdes/problem_graph/log_tools/monitor_spec.py +++ b/spins/invdes/problem_graph/log_tools/monitor_spec.py @@ -114,18 +114,18 @@ def convert_element_list(monitor_descriptions: List[MonitorDescription] if m.joiner_id in monitor_dict: # Check that all the other fields are the same for this joined monitor. m_joined = monitor_dict[m.joiner_id] - if not m.monitor_type == m_joined.monitor_type: - raise ValueError("Monitor type for " + m.monitor.name + - " is inconsistent for joiner id " + - m.joiner_id) - if not m.scalar_operation == m_joined.scalar_operation: - raise ValueError("Scalar operation for " + m.monitor.name + - " is inconsistent for joiner id " + - m.joiner_id) - if not m.vector_operation == m_joined.vector_operation: - raise ValueError("Vector operation for " + m.monitor.name + - " is inconsistent for joiner id " + - m.joiner_id) + if m.monitor_type != m_joined.monitor_type: + raise ValueError( + f"Monitor type for {m.monitor.name} is inconsistent for joiner id {m.joiner_id}" + ) + if m.scalar_operation != m_joined.scalar_operation: + raise ValueError( + f"Scalar operation for {m.monitor.name} is inconsistent for joiner id {m.joiner_id}" + ) + if m.vector_operation != m_joined.vector_operation: + raise ValueError( + f"Vector operation for {m.monitor.name} is inconsistent for joiner id {m.joiner_id}" + ) # All fields are consistent with the joined monitor fields, so add this monitor name. monitor_dict[m.joiner_id].monitor_names.append(m.monitor.name) diff --git a/spins/invdes/problem_graph/log_tools/plotter.py b/spins/invdes/problem_graph/log_tools/plotter.py index bd68ddb..ddb4491 100644 --- a/spins/invdes/problem_graph/log_tools/plotter.py +++ b/spins/invdes/problem_graph/log_tools/plotter.py @@ -72,11 +72,7 @@ def plot_scalar_monitors( log_df, monitor_description.monitor_names, event_name, scalar_op) # Choose axis object to plot on. - if same_plt: - axs = axes_list[0] - else: - axs = axes_list[plot_num] - + axs = axes_list[0] if same_plt else axes_list[plot_num] axs.plot( plot_data.iterations, plot_data.data, @@ -85,14 +81,13 @@ def plot_scalar_monitors( axs.axvline(x=change, color="k", linestyle="--") if same_plt: axs.legend() + elif plot_data.data.size: + axs.set_title("{name}:\nFinal value: {value:1.4E}".format( + name=monitor_description.joiner_id, + value=plot_data.data[-1])) else: - if not plot_data.data.size: - axs.set_title("{name}:\nNo iteration data found.".format( - name=monitor_description.joiner_id)) - else: - axs.set_title("{name}:\nFinal value: {value:1.4E}".format( - name=monitor_description.joiner_id, - value=plot_data.data[-1])) + axs.set_title("{name}:\nNo iteration data found.".format( + name=monitor_description.joiner_id)) axs.set_xlabel("Iteration") # Save generated figures to multipage pdf object. @@ -154,13 +149,13 @@ def plot_field_data( # Get the magnitude data. monitor_description = monitor_description_list.monitor_list[plt_ind] - field_dat = loader.get_single_monitor_data( + if field_dat := loader.get_single_monitor_data( log_df, monitor_description.monitor_names, transformation_name=transformation_name, iteration=iteration, - event_name=event_name) - if field_dat: + event_name=event_name, + ): field = loader.process_field( field_dat, vector_operation=monitor_description.vector_operation, @@ -170,8 +165,8 @@ def plot_field_data( # Make sure field is plottable as an image. if len(field.shape) != 2: raise ValueError( - "Plotted field data must be 2D, but {} has dimensions {}". - format(monitor_description.joiner_id, len(field.shape))) + f"Plotted field data must be 2D, but {monitor_description.joiner_id} has dimensions {len(field.shape)}" + ) im_plt = axs.imshow(field, origin="lower") axs.set_title(monitor_description.joiner_id) @@ -210,16 +205,14 @@ def _create_figs(num_plts: int, num_rows: int = 1, num_cols: int = 1) -> List: """ num_figs = math.ceil(num_plts / (num_rows * num_cols)) - extras = num_plts % (num_rows * num_cols) figs_list = [] - for fig_ind in range(num_figs): + for _ in range(num_figs): fig, _ = plt.subplots( num_rows, num_cols, figsize=(10, 6), tight_layout=True) figs_list.append(fig) - # Remove extra axes if necessary. - if extras: - for extras_ind in range((num_rows * num_cols) - extras): + if extras := num_plts % (num_rows * num_cols): + for _ in range((num_rows * num_cols) - extras): figs_list[-1].delaxes(figs_list[-1].axes[-1]) return figs_list diff --git a/spins/invdes/problem_graph/optplan/context.py b/spins/invdes/problem_graph/optplan/context.py index ddb4d2a..41c435b 100644 --- a/spins/invdes/problem_graph/optplan/context.py +++ b/spins/invdes/problem_graph/optplan/context.py @@ -118,8 +118,8 @@ def register_node_type(self, node_meta_type: str, node_type: str, node_map = self._optplan_node_map.get(node_meta_type, {}) if node_type in node_map: raise ValueError( - "Node type '{}' with metatype '{}' was registered twice.". - format(node_type, node_meta_type)) + f"Node type '{node_type}' with metatype '{node_meta_type}' was registered twice." + ) node_map[node_type] = (model, fun) self._optplan_node_map[node_meta_type] = node_map @@ -157,9 +157,7 @@ def pop(self) -> Optional[OptplanContext]: Returns: The context at the top, or `None` if stack is empty. """ - if self._stack: - return self._stack.pop() - return None + return self._stack.pop() if self._stack else None def peek(self) -> Optional[OptplanContext]: """Returns the context from the top of the stack without popping. @@ -167,9 +165,7 @@ def peek(self) -> Optional[OptplanContext]: Returns: The context at the top, or `None` is stack is emptpy. """ - if self._stack: - return self._stack[-1] - return None + return self._stack[-1] if self._stack else None def get_node_model(self, node_meta_type: str, node_type: str) -> Optional[models.Model]: @@ -187,8 +183,7 @@ def get_node_model(self, node_meta_type: str, model found. """ for context in reversed(self._stack): - model = context.get_node_model(node_meta_type, node_type) - if model: + if model := context.get_node_model(node_meta_type, node_type): return model return None @@ -209,8 +204,7 @@ def get_node_creator(self, node_meta_type: str, model found. """ for context in reversed(self._stack): - model = context.get_node_creator(node_meta_type, node_type) - if model: + if model := context.get_node_creator(node_meta_type, node_type): return model return None @@ -229,6 +223,6 @@ def get_node_model_dict(self, """ model_dict = {} for context in self._stack: - model_dict.update(context.get_node_model_dict(node_meta_type)) + model_dict |= context.get_node_model_dict(node_meta_type) return model_dict diff --git a/spins/invdes/problem_graph/optplan/io.py b/spins/invdes/problem_graph/optplan/io.py index 261dff3..8e93734 100644 --- a/spins/invdes/problem_graph/optplan/io.py +++ b/spins/invdes/problem_graph/optplan/io.py @@ -48,7 +48,7 @@ def generate_name(model_type: str) -> str: if model_type not in problem_graph_name_map: problem_graph_name_map[model_type] = 0 - name = "{}.{}".format(model_type, problem_graph_name_map[model_type]) + name = f"{model_type}.{problem_graph_name_map[model_type]}" problem_graph_name_map[model_type] += 1 @@ -152,18 +152,16 @@ def _iter_optplan_fields( # Wrap `process_field` so that returning `None` is same as returning the # child. def process_field_wrapped( - parent: models.Model, - child: Union[str, optplan.ProblemGraphNode], - field_type: optplan.ReferenceType, - ) -> optplan.ProblemGraphNode: + parent: models.Model, + child: Union[str, optplan.ProblemGraphNode], + field_type: optplan.ReferenceType, + ) -> optplan.ProblemGraphNode: if pass_field_info: return_val = process_field(parent, child, field_type) else: return_val = process_field(parent, child) - if return_val is None: - return child - return return_val + return child if return_val is None else return_val with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -238,8 +236,9 @@ def validate(plan: optplan.OptimizationPlan) -> None: node_name = node.name node.validate() except Exception as exc: - raise ValueError("Error encountered when validating node {}".format( - node_name)) from exc + raise ValueError( + f"Error encountered when validating node {node_name}" + ) from exc # Now validate the plan schema itself just in case we missed something # from the previous checks. @@ -249,7 +248,7 @@ def validate(plan: optplan.OptimizationPlan) -> None: names = set() for node in plan.nodes: if node.name in names: - raise ValueError("Nonunique name found: {}".format(node.name)) + raise ValueError(f"Nonunique name found: {node.name}") names.add(node.name) @@ -287,8 +286,7 @@ def _validate_optplan_version(version: str) -> None: """ version_parts = [int(part) for part in version.split(".")] if version_parts[0] < 0 or version_parts[1] < 2 or version_parts[2] < 3: - raise ValueError( - "Optplan must be at least version 0.2.1, got {}".format(version)) + raise ValueError(f"Optplan must be at least version 0.2.1, got {version}") def loads(serialized_plan: str) -> optplan.OptimizationPlan: diff --git a/spins/invdes/problem_graph/optplan/optplan.py b/spins/invdes/problem_graph/optplan/optplan.py index ebc1f3d..d96fbb9 100644 --- a/spins/invdes/problem_graph/optplan/optplan.py +++ b/spins/invdes/problem_graph/optplan/optplan.py @@ -68,16 +68,19 @@ def _convert(self, value, context): if model_class is None: raise ValueError( - "Unknown node, got node type '{}' with metatype '{}'".format( - value["type"], self._node_meta_type)) + f"""Unknown node, got node type '{value["type"]}' with metatype '{self._node_meta_type}'""" + ) return model_class(value, context=context) def _export(self, value, format, context): # pylint: disable=redefined-builtin - if (not value.__class__ in optplan.GLOBAL_CONTEXT_STACK. - get_node_model_dict(self._node_meta_type).values()): - raise ValueError("Cannot export model with type '{}'".format( - value.__class__)) + if ( + value.__class__ + not in optplan.GLOBAL_CONTEXT_STACK.get_node_model_dict( + self._node_meta_type + ).values() + ): + raise ValueError(f"Cannot export model with type '{value.__class__}'") return value.export(context=context) @@ -107,8 +110,8 @@ def __init__(self, *args, **kwargs) -> None: # Validate the name. if self.name.startswith("__"): raise ValueError( - "Name cannot start with two underscores (__), got {}".format( - self.name)) + f"Name cannot start with two underscores (__), got {self.name}" + ) # Verify that reference fields have been appropriately set. This is # actually a redundant check since `optplan.loads` and `optplan.dumps` @@ -135,8 +138,9 @@ def __init__(self, *args, **kwargs) -> None: elif isinstance(field_value, (str, field_type.reference_type)): continue - raise ValueError("Expected type {} for field {}, got {}".format( - field_type.reference_type, field_name, type(field_value))) + raise ValueError( + f"Expected type {field_type.reference_type} for field {field_name}, got {type(field_value)}" + ) class EmOverlap(ProblemGraphNode): @@ -176,7 +180,7 @@ def __add__(self, obj) -> "optplan.Sum": return optplan.Sum(functions=[self, optplan.make_constant(obj)]) if isinstance(obj, Function): return optplan.Sum(functions=[self, obj]) - raise TypeError("Attempting to add node with type {}".format(type(obj))) + raise TypeError(f"Attempting to add node with type {type(obj)}") def __mul__(self, obj) -> "optplan.Product": if isinstance(obj, optplan.Product): @@ -187,8 +191,7 @@ def __mul__(self, obj) -> "optplan.Product": return optplan.Product(functions=[self, optplan.make_constant(obj)]) if isinstance(obj, Function): return optplan.Product(functions=[self, obj]) - raise TypeError("Attempting to multiply node with type {}".format( - type(obj))) + raise TypeError(f"Attempting to multiply node with type {type(obj)}") def __pow__(self, obj) -> "optplan.Power": if isinstance(obj, numbers.Real): diff --git a/spins/invdes/problem_graph/optplan/schema_function.py b/spins/invdes/problem_graph/optplan/schema_function.py index 33947c1..2491490 100644 --- a/spins/invdes/problem_graph/optplan/schema_function.py +++ b/spins/invdes/problem_graph/optplan/schema_function.py @@ -27,8 +27,8 @@ def __add__(self, obj): if isinstance(obj, (numbers.Number, optplan.ComplexNumber)): return Sum(functions=self.functions + [make_constant(obj)]) raise TypeError( - "Attempting to add a node with type {} to type `Sum`.".format( - type(obj))) + f"Attempting to add a node with type {type(obj)} to type `Sum`." + ) @optplan.register_node_type() @@ -50,8 +50,8 @@ def __mul__(self, obj): if isinstance(obj, (numbers.Number, optplan.ComplexNumber)): return Product(functions=self.functions + [make_constant(obj)]) raise TypeError( - "Attempting to multiply a node with type {} to type `Product`.". - format(type(obj))) + f"Attempting to multiply a node with type {type(obj)} to type `Product`." + ) @optplan.register_node_type() diff --git a/spins/invdes/problem_graph/optplan/test_context.py b/spins/invdes/problem_graph/optplan/test_context.py index 2df114e..16ae7c5 100644 --- a/spins/invdes/problem_graph/optplan/test_context.py +++ b/spins/invdes/problem_graph/optplan/test_context.py @@ -67,7 +67,7 @@ def test_optplan_context_register_node_same_name_throws_error(): def test_optplan_context_get_nonexistent_node(): ctx = optplan.OptplanContext() - assert ctx.get_node_model("testmeta", "dummy") == None + assert ctx.get_node_model("testmeta", "dummy") is None def test_optplan_context_get_node_model_dict(): @@ -97,8 +97,8 @@ def test_optplan_context_stack_get_node_model(): assert ctx_stack.get_node_model("testmeta", "dummy") == DummyModel assert ctx_stack.get_node_creator("testmeta", "dummy") == dummy_creator - assert ctx_stack.get_node_model("testmeta", "dummy2") == None - assert ctx_stack.get_node_creator("testmeta", "dummy2") == None + assert ctx_stack.get_node_model("testmeta", "dummy2") is None + assert ctx_stack.get_node_creator("testmeta", "dummy2") is None def test_optplan_context_stack_get_node_model_multiple(): @@ -114,11 +114,11 @@ def test_optplan_context_stack_get_node_model_multiple(): assert ctx_stack.get_node_model("testmeta", "dummy") == DummyModel assert ctx_stack.get_node_model("testmeta", "dummy2") == DummyModel2 - assert ctx_stack.get_node_model("testmeta", "dummy3") == None + assert ctx_stack.get_node_model("testmeta", "dummy3") is None assert ctx_stack.get_node_creator("testmeta", "dummy") == dummy_creator assert ctx_stack.get_node_creator("testmeta", "dummy2") == dummy_creator2 - assert ctx_stack.get_node_creator("testmeta", "dummy3") == None + assert ctx_stack.get_node_creator("testmeta", "dummy3") is None def test_optplan_context_stack_get_node_model_overwriting(): @@ -141,7 +141,7 @@ def test_optplan_context_stack_push_and_pop(): ctx2 = optplan.OptplanContext() ctx_stack = optplan.OptplanContextStack() - assert ctx_stack.peek() == None + assert ctx_stack.peek() is None ctx_stack.push(ctx) assert ctx_stack.peek() == ctx @@ -154,5 +154,5 @@ def test_optplan_context_stack_push_and_pop(): assert ctx_stack.peek() == ctx assert ctx_stack.pop() == ctx - assert ctx_stack.peek() == None - assert ctx_stack.pop() == None + assert ctx_stack.peek() is None + assert ctx_stack.pop() is None diff --git a/spins/invdes/problem_graph/schema_utils.py b/spins/invdes/problem_graph/schema_utils.py index a1023bf..e638784 100644 --- a/spins/invdes/problem_graph/schema_utils.py +++ b/spins/invdes/problem_graph/schema_utils.py @@ -176,11 +176,11 @@ def __init__(self, *args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore") - init_kwargs = {} - for key, value in kwargs.items(): - if key not in self._schema.fields.keys(): - init_kwargs[key] = value - + init_kwargs = { + key: value + for key, value in kwargs.items() + if key not in self._schema.fields.keys() + } super().__init__(*args, **init_kwargs) for key, item in kwargs.items(): if key in self._schema.fields.keys(): diff --git a/spins/invdes/problem_graph/simspace.py b/spins/invdes/problem_graph/simspace.py index 6ef9024..dfdc165 100644 --- a/spins/invdes/problem_graph/simspace.py +++ b/spins/invdes/problem_graph/simspace.py @@ -140,8 +140,8 @@ def __call__(self, wlen: float) -> SimulationSpaceInstance: )) else: raise NotImplementedError( - "Selection matrix type {} not yet implemented".format( - self._selmat_type)) + f"Selection matrix type {self._selmat_type} not yet implemented" + ) self._cache[wlen] = SimulationSpaceInstance( eps_bg=eps_bg, selection_matrix=selection_mat) @@ -185,12 +185,10 @@ def _create_edge_coords(sim_region: optplan.Box3d, xyz_min = np.array(sim_region.center) - np.array(sim_region.extents) / 2 xyz_max = np.array(sim_region.center) + np.array(sim_region.extents) / 2 - edge_coords = [] - for i in range(3): - edge_coords.append( - np.arange(xyz_min[i] - dx / 2, xyz_max[i] + dx / 2, dx)) - - return edge_coords + return [ + np.arange(xyz_min[i] - dx / 2, xyz_max[i] + dx / 2, dx) + for i in range(3) + ] def _create_grid(eps_spec: optplan.EpsilonSpec, @@ -233,7 +231,8 @@ def _create_grid(eps_spec: optplan.EpsilonSpec, grid.render() else: raise NotImplementedError( - "Epsilon spec not implemented for type {}".format(eps_spec.type)) + f"Epsilon spec not implemented for type {eps_spec.type}" + ) # Return epsilon and dxes. return grid @@ -281,7 +280,7 @@ def _draw_mesh_on_grid(mesh: optplan.Mesh, thickness=thickness, eps=eps_mat) else: - raise ValueError("Encountered unknown mesh type: {}".format(mesh.type)) + raise ValueError(f"Encountered unknown mesh type: {mesh.type}") def _draw_gds_on_grid(gds_stack: List[optplan.GdsMaterialStackLayer], @@ -368,13 +367,11 @@ def _get_mat_index(index_element: optplan.Material, # Check if full path already given. if os.path.isfile(fname): csv_file = fname - # Look for csv file in the material/csv_files directory. else: path_dir = os.path.dirname(inspect.getfile(material)) csv_file = os.path.join(path_dir, "csv_files", fname) if not os.path.isfile(csv_file): - raise ValueError( - "No csv file named %s or %s found." % (fname, csv_file)) + raise ValueError(f"No csv file named {fname} or {csv_file} found.") index_data = pd.read_csv(csv_file, header=0) if "wl" not in index_data.columns: raise ValueError( diff --git a/spins/invdes/problem_graph/solver.py b/spins/invdes/problem_graph/solver.py index e4ed8cc..37dc2e3 100644 --- a/spins/invdes/problem_graph/solver.py +++ b/spins/invdes/problem_graph/solver.py @@ -116,8 +116,8 @@ def restore_workspace(plan: optplan.OptimizationPlan, work: workspace.Workspace, # `transform_index - 1` transformation. for i, transform in enumerate(plan.transformations): if os.path.exists( - os.path.join(save_folder, - "{}.chkpt.pkl".format(transform.name))): + os.path.join(save_folder, f"{transform.name}.chkpt.pkl") + ): transform_index = i + 1 # Load the checkpoint data. @@ -127,9 +127,10 @@ def restore_workspace(plan: optplan.OptimizationPlan, work: workspace.Workspace, # parameters. if transform_index > 0: chkpt_file = os.path.join( - save_folder, "{}.chkpt.pkl".format( - plan.transformations[transform_index - 1].name)) - console_logger.info("Restoring from checkpoint {}".format(chkpt_file)) + save_folder, + f"{plan.transformations[transform_index - 1].name}.chkpt.pkl", + ) + console_logger.info(f"Restoring from checkpoint {chkpt_file}") with open(chkpt_file, "rb") as fp: chkpt_data = pickle.load(fp) @@ -165,7 +166,7 @@ def restore_workspace(plan: optplan.OptimizationPlan, work: workspace.Workspace, if not log_file: return transform_index, None - console_logger.info("Restoring from log {}".format(log_file)) + console_logger.info(f"Restoring from log {log_file}") with open(log_file, "rb") as fp: log_data = pickle.load(fp) diff --git a/spins/invdes/problem_graph/test_optplan.py b/spins/invdes/problem_graph/test_optplan.py index df8050f..6bfca1e 100644 --- a/spins/invdes/problem_graph/test_optplan.py +++ b/spins/invdes/problem_graph/test_optplan.py @@ -111,19 +111,21 @@ def build_objective(simspace: plan.SimulationSpace, opt_stage: str): sim_mons.append( plan.FieldMonitor( function=sim, - name=opt_stage + "_" + name + "_mon", + name=f"{opt_stage}_{name}_mon", center=[0, 0, 0], normal=[0, 0, 1], - )) + ) + ) eps_mons = [] for eps, name in zip([eps1550, eps1300], ["eps1550", "eps1300"]): eps_mons.append( plan.FieldMonitor( function=eps, - name=opt_stage + "_" + name + "_mon", + name=f"{opt_stage}_{name}_mon", center=[0, 0, 0], normal=[0, 0, 1], - )) + ) + ) def powercomp(overlap: plan.Overlap, target: float): return plan.PowerComp( @@ -144,7 +146,9 @@ def powercomp(overlap: plan.Overlap, target: float): ["sim1550_top", "sim1550_bot", "sim1300_top", "sim1300_bot"]): overlap_mons.append( plan.SimpleMonitor( - name=opt_stage + "_" + name + "_mon", function=overlap)) + name=f"{opt_stage}_{name}_mon", function=overlap + ) + ) return (plan.Sum(functions=[ powercomp(over, val) for over, val in zip(overlaps, [1, 0, 0, 1]) diff --git a/spins/invdes/problem_graph/workspace.py b/spins/invdes/problem_graph/workspace.py index 989b91b..8c90cb4 100644 --- a/spins/invdes/problem_graph/workspace.py +++ b/spins/invdes/problem_graph/workspace.py @@ -51,10 +51,7 @@ def __init__(self, # the graph. self._nodes = {} # List of all the objects that have been instantiated. - self._objects = {} - - # TODO(logansu): Accept argument-less variable. - self._objects[VARIABLE_NODE] = problem.Variable(1) + self._objects = {VARIABLE_NODE: problem.Variable(1)} if isinstance(nodes, collections.Iterable): for node in nodes: @@ -92,7 +89,7 @@ def process_field( add_node(child) if add_node(node): - visited = set(id(node_) for node_ in self._nodes) + visited = {id(node_) for node_ in self._nodes} # TODO(logansu): Deal with this private import. from spins.invdes.problem_graph.optplan.io import _iter_optplan_fields _iter_optplan_fields(node, visited, process_field) @@ -120,8 +117,9 @@ def run(self, creator = optplan.GLOBAL_CONTEXT_STACK.get_node_creator( optplan.NodeMetaType.TRANSFORMATION, node.transformation.type) if creator is None: - raise ValueError("Unable to find creator for transformation with " - "type {}".format(node.type)) + raise ValueError( + f"Unable to find creator for transformation with type {node.type}" + ) if self._logger: self._logger.set_transformation_name(node.name) @@ -174,12 +172,9 @@ def get_object(self, node = name_or_node else: raise ValueError( - "`name_or_node` must be string or `ProblemGraphNode`" - ", got {}".format(type(name_or_node))) + f"`name_or_node` must be string or `ProblemGraphNode`, got {type(name_or_node)}" + ) - # Return the cached object. Note that we do this before `_add_node` - # just in case someone decided to modify `node` after adding it to - # the workspace. if node.name in self._objects: if return_graph_node: return self._objects[node.name], self._nodes[node.name] @@ -192,11 +187,7 @@ def get_object(self, # Create the actual object. creator = optplan.GLOBAL_CONTEXT_STACK.get_node_creator( optplan.NodeMetaType.OPTPLAN_NODE, node.type) - if creator is None: - self._objects[node.name] = node - else: - self._objects[node.name] = creator(node, self) - + self._objects[node.name] = node if creator is None else creator(node, self) if return_graph_node: return self._objects[node.name], self._nodes[node.name] else: @@ -218,12 +209,11 @@ def get_objects_by_type( corresponding to the objects. If `return_graph_node` is `True`, returns `Dict[str, Tuple[object, optplan.ProblemGraphNode]]`. """ - objects = {} - for node_name, node in self._nodes.items(): - if isinstance(node, model_type): - objects[node.name] = self.get_object(node, return_graph_node) - - return objects + return { + node.name: self.get_object(node, return_graph_node) + for node_name, node in self._nodes.items() + if isinstance(node, model_type) + } def _set_parameters(work: workspace.Workspace, @@ -298,25 +288,22 @@ def write_checkpoint(self, filename: str) -> None: Args: filename: Name of the checkpoint file. """ - # Get workspace parameters. - parameter_data = {} parameter_list = self._work.get_objects_by_type(optplan.Parameter) - for param_name, param_obj in parameter_list.items(): - parameter_data[param_name] = param_obj.value - - # Get parametrizations. - parametrization_data = {} + parameter_data = { + param_name: param_obj.value + for param_name, param_obj in parameter_list.items() + } param_list = self._work.get_objects_by_type(optplan.Parametrization) - for name, obj in param_list.items(): - parametrization_data[name] = obj.serialize() - + parametrization_data = { + name: obj.serialize() for name, obj in param_list.items() + } data = { "time": str(datetime.now()), "parameters": parameter_data, "parametrizations": parametrization_data, } - checkpoint_file = filename + ".chkpt.pkl" + checkpoint_file = f"{filename}.chkpt.pkl" self._logger.info("Saving checkpoint file: %s", checkpoint_file) # Save the data. @@ -346,13 +333,11 @@ def write(self, for mon, mon_val in zip(monitor_list, mon_vals): monitor_data[mon.name] = mon_val - # Get workspace parameters. - parameter_data = {} parameter_list = self._work.get_objects_by_type(optplan.Parameter) - for param_name, param_obj in parameter_list.items(): - parameter_data[param_name] = param_obj.calculate_objective_function( - param) - + parameter_data = { + param_name: param_obj.calculate_objective_function(param) + for param_name, param_obj in parameter_list.items() + } # Make a log entry. data = { "transformation": self._transform_name, @@ -370,7 +355,8 @@ def write(self, # Save the data. file_path = os.path.join( - self._path, os.path.join("step{}.pkl".format(self._log_counter))) + self._path, os.path.join(f"step{self._log_counter}.pkl") + ) with open(file_path, "wb") as handle: pickle.dump(data, handle) @@ -393,9 +379,8 @@ def get_latest_log_step(folder: str) -> int: # Keep track of the file with the largest step. max_step = 0 for name in filenames: - match = re.search(r"step(?P\d+)\.pkl$", name) - if match: - max_step = max(max_step, int(match.group("step"))) + if match := re.search(r"step(?P\d+)\.pkl$", name): + max_step = max(max_step, int(match["step"])) return max_step @@ -412,6 +397,4 @@ def get_latest_log_file(folder: str) -> Optional[str]: Filename of the last log step if it exists. Else `None`. """ max_step = get_latest_log_step(folder) - if max_step == 0: - return None - return os.path.join(folder, "step{}.pkl".format(max_step)) + return None if max_step == 0 else os.path.join(folder, f"step{max_step}.pkl")