diff --git a/src/pylife/stress/rainflow/fkm_nonlinear.py b/src/pylife/stress/rainflow/fkm_nonlinear.py index 4f000fb8..06185b90 100644 --- a/src/pylife/stress/rainflow/fkm_nonlinear.py +++ b/src/pylife/stress/rainflow/fkm_nonlinear.py @@ -543,17 +543,17 @@ def _adjust_samples_and_flush_for_hcm_first_run(self, samples): if not is_multi_index: samples = np.concatenate([[0], np.asarray(samples)]) else: - # get the index with all node_id`s - node_id_index = samples.groupby("node_id").first().index + assessment_levels = [name for name in samples.index.names if name != "load_step"] + assessment_idx = samples.groupby(assessment_levels).first().index # create a new sample with 0 load for all nodes - multi_index = pd.MultiIndex.from_product([[0], node_id_index], names=["load_step","node_id"]) + multi_index = pd.MultiIndex.from_product([[0], assessment_idx], names=samples.index.names) first_sample = pd.Series(0, index=multi_index) # increase the load_step index value by one for all samples samples_without_index = samples.reset_index() samples_without_index.load_step += 1 - samples = samples_without_index.set_index(["load_step", "node_id"])[0] + samples = samples_without_index.set_index(samples.index.names)[0] # prepend the new zero load sample samples = pd.concat([first_sample, samples], axis=0) @@ -568,9 +568,7 @@ def _adjust_samples_and_flush_for_hcm_first_run(self, samples): scalar_samples_twice = np.concatenate([scalar_samples, scalar_samples]) turn_indices, _ = RFG.find_turns(scalar_samples_twice) - flush = True - if len(scalar_samples)-1 not in turn_indices: - flush = False + flush = len(scalar_samples)-1 in turn_indices return samples, flush diff --git a/src/pylife/stress/rainflow/recorders.py b/src/pylife/stress/rainflow/recorders.py index d77f0270..3039e161 100644 --- a/src/pylife/stress/rainflow/recorders.py +++ b/src/pylife/stress/rainflow/recorders.py @@ -288,11 +288,12 @@ def collective(self): """ if len(self.S_min) > 0 and len(self.S_min.index.names) > 1: - n_nodes = self.S_min.groupby('node_id').first().count() - n_hystereses = int(len(self.S_min) / n_nodes) + assessment_levels = [name for name in self.S_min.index.names if name != "load_step"] + n_assessment_points = self.S_min.groupby(assessment_levels).first().count() + n_hystereses = int(len(self.S_min) / n_assessment_points) index = pd.MultiIndex.from_product( - [range(n_hystereses), range(n_nodes)], + [range(n_hystereses), range(n_assessment_points)], names=["hysteresis_index", "assessment_point_index"], ) else: diff --git a/tests/stress/rainflow/test_fkm_nonlinear.py b/tests/stress/rainflow/test_fkm_nonlinear.py index 5ca10722..0d9f2b88 100644 --- a/tests/stress/rainflow/test_fkm_nonlinear.py +++ b/tests/stress/rainflow/test_fkm_nonlinear.py @@ -817,6 +817,31 @@ def test_hcm_first_second(vals, num): assert multi_collective.to_json(indent=4) == reference +def test_element_id(): + vals = np.array([100., -200., 100., -250., 200., 0., 200., -200.]) * (250+6.6)/250 * 1.4 + signal = pd.DataFrame({11: vals, 12: 2*vals, 13: 3*vals, 'load_step': range(len(vals))}).set_index('load_step').stack() + signal.index.names = ['load_step', 'element_id'] + + E = 206e3 # [MPa] Young's modulus + K = 3.1148*(1251)**0.897 / (( np.min([0.338, 1033.*1251.**(-1.235)]) )**0.187) + #K = 2650.5 # [MPa] + n = 0.187 # [-] + K_p = 3.5 # [-] (de: Traglastformzahl) K_p = F_plastic / F_yield (3.1.1) + + extended_neuber = NAL.ExtendedNeuber(E, K, n, K_p) + + detector = FKMNonlinearDetector( + recorder=RFR.FKMNonlinearRecorder(), notch_approximation_law=extended_neuber + ) + detector.process_hcm_first(signal).process_hcm_second(signal) + + with open(f'tests/stress/rainflow/reference-fkm-nonlinear/reference_first_second-7.json') as f: + reference = f.read() + + assert detector.recorder.collective.to_json(indent=4) == reference + assert detector.recorder.loads_max.index.names == ["load_step", "element_id"] + + @pytest.fixture def detector_seeger_beste(): E = 206e3 # [MPa] Young's modulus