@@ -538,3 +538,185 @@ def generate_training_set(num_samples, para_dim):
538
538
539
539
print (f"Projection error (sigma): { projection_error_array_sigma } " )
540
540
print (f"Projection error (u): { projection_error_array_u } " )
541
+
542
+ # ### Projection error ends ###
543
+
544
+ # Creating dataset
545
+ def generate_ann_input_set (num_ann_samples = 10 ):
546
+ # ((-2.5, -1.5), (0., 1.), (0.2, 0.8), (0.2, 0.8), (2.5, 3.5))
547
+ xlimits = np .array ([[- 2.5 , - 1.5 ], [0. , 1. ],
548
+ [0.2 , 0.8 ], [0.2 , 0.8 ],
549
+ [2.5 , 3.5 ]])
550
+ sampling = LHS (xlimits = xlimits )
551
+ training_set = sampling (num_ann_samples )
552
+ return training_set
553
+
554
+ def generate_ann_output_set (problem , reduced_problem , input_set , mode = None ):
555
+ output_set_sigma = np .zeros ([input_set .shape [0 ], len (reduced_problem ._basis_functions_sigma )])
556
+ output_set_u = np .zeros ([input_set .shape [0 ], len (reduced_problem ._basis_functions_u )])
557
+ for i in range (input_set .shape [0 ]):
558
+ if mode is None :
559
+ print (f"Parameter number { i + 1 } of { input_set .shape [0 ]} : { input_set [i ,:]} " )
560
+ else :
561
+ print (f"{ mode } parameter number { i + 1 } of { input_set .shape [0 ]} : { input_set [i ,:]} " )
562
+ solution_sigma , solution_u = problem .solve (input_set [i , :])
563
+ output_set_sigma [i , :] = reduced_problem .project_snapshot_sigma (solution_sigma , len (reduced_problem ._basis_functions_sigma )).array
564
+ output_set_u [i , :] = reduced_problem .project_snapshot_u (solution_u , len (reduced_problem ._basis_functions_u )).array
565
+ return output_set_sigma , output_set_u
566
+
567
+ ann_input_set = generate_ann_input_set (num_ann_samples = ann_input_samples_num )
568
+ np .random .shuffle (ann_input_set )
569
+ ann_output_set_sigma , ann_output_set_u = \
570
+ generate_ann_output_set (problem_parametric , reduced_problem ,
571
+ ann_input_set , mode = "Training" )
572
+
573
+ num_training_samples = int (0.7 * ann_input_set .shape [0 ])
574
+ num_validation_samples = ann_input_set .shape [0 ] - num_training_samples
575
+
576
+ input_training_set = ann_input_set [:num_training_samples , :]
577
+ output_training_set_sigma = ann_output_set_sigma [:num_training_samples , :]
578
+ output_training_set_u = ann_output_set_u [:num_training_samples , :]
579
+
580
+ input_validation_set = ann_input_set [num_training_samples :, :]
581
+ output_validation_set_sigma = ann_output_set_sigma [num_training_samples :, :]
582
+ output_validation_set_u = ann_output_set_u [num_training_samples :, :]
583
+
584
+ reduced_problem .output_range_sigma [0 ] = np .min (ann_output_set_sigma )
585
+ reduced_problem .output_range_sigma [1 ] = np .max (ann_output_set_sigma )
586
+ reduced_problem .output_range_u [0 ] = np .min (ann_output_set_u )
587
+ reduced_problem .output_range_u [1 ] = np .max (ann_output_set_u )
588
+
589
+ customDataset = CustomDataset (reduced_problem , input_training_set ,
590
+ output_training_set_sigma ,
591
+ input_scaling_range = reduced_problem .input_scaling_range ,
592
+ output_scaling_range = reduced_problem .output_scaling_range_sigma ,
593
+ input_range = reduced_problem .input_range ,
594
+ output_range = reduced_problem .output_range_sigma , verbose = False )
595
+ train_dataloader_sigma = DataLoader (customDataset , batch_size = 6 , shuffle = False ) # shuffle=True)
596
+
597
+ customDataset = CustomDataset (reduced_problem , input_validation_set ,
598
+ output_validation_set_sigma ,
599
+ input_scaling_range = reduced_problem .input_scaling_range ,
600
+ output_scaling_range = reduced_problem .output_scaling_range_sigma ,
601
+ input_range = reduced_problem .input_range ,
602
+ output_range = reduced_problem .output_range_sigma , verbose = False )
603
+ valid_dataloader_sigma = DataLoader (customDataset , shuffle = False )
604
+
605
+ customDataset = \
606
+ CustomDataset (reduced_problem , input_training_set ,
607
+ output_training_set_u ,
608
+ input_scaling_range = [- 1. , 1. ],
609
+ output_scaling_range = reduced_problem .output_scaling_range_u ,
610
+ input_range = reduced_problem .input_range ,
611
+ output_range = reduced_problem .output_range_u , verbose = False )
612
+ train_dataloader_u = DataLoader (customDataset , batch_size = 6 , shuffle = False ) # shuffle=True)
613
+
614
+ customDataset = \
615
+ CustomDataset (reduced_problem , input_validation_set ,
616
+ output_validation_set_u ,
617
+ input_scaling_range = [- 1. , 1. ],
618
+ output_scaling_range = reduced_problem .output_scaling_range_u ,
619
+ input_range = reduced_problem .input_range ,
620
+ output_range = reduced_problem .output_range_u , verbose = False )
621
+ valid_dataloader_u = DataLoader (customDataset , shuffle = False )
622
+
623
+ # ANN model
624
+ model_sigma = HiddenLayersNet (input_training_set .shape [1 ], [55 , 55 , 55 ],
625
+ len (reduced_problem ._basis_functions_sigma ),
626
+ Tanh ())
627
+
628
+ model_u = HiddenLayersNet (input_training_set .shape [1 ], [55 , 55 , 55 ],
629
+ len (reduced_problem ._basis_functions_u ),
630
+ Tanh ())
631
+
632
+ path = "model_sigma.pth"
633
+ save_model (model_sigma , path )
634
+ # load_model(model_sigma, path)
635
+
636
+ training_loss_sigma = list ()
637
+ validation_loss_sigma = list ()
638
+
639
+ max_epochs_sigma = 50 # 20000
640
+ min_validation_loss_sigma = None
641
+ start_epoch_sigma = 0
642
+ checkpoint_path_sigma = "checkpoint_sigma"
643
+ checkpoint_epoch_sigma = 10
644
+
645
+ learning_rate_sigma = 5.e-6
646
+ optimiser_sigma = get_optimiser (model_sigma , "Adam" , learning_rate_sigma )
647
+ loss_fn_sigma = get_loss_func ("MSE" , reduction = "sum" )
648
+
649
+ if os .path .exists (checkpoint_path_sigma ):
650
+ start_epoch_sigma , min_validation_loss_sigma = \
651
+ load_checkpoint (checkpoint_path_sigma , model_sigma , optimiser_sigma )
652
+
653
+ import time
654
+ start_time = time .time ()
655
+ for epochs in range (start_epoch_sigma , max_epochs_sigma ):
656
+ if epochs > 0 and epochs % checkpoint_epoch_sigma == 0 :
657
+ save_checkpoint (checkpoint_path_sigma , epochs , model_sigma ,
658
+ optimiser_sigma , min_validation_loss_sigma )
659
+ print (f"Epoch: { epochs + 1 } /{ max_epochs_sigma } " )
660
+ current_training_loss = train_nn (reduced_problem , train_dataloader_sigma ,
661
+ model_sigma , loss_fn_sigma ,
662
+ optimiser_sigma )
663
+ training_loss_sigma .append (current_training_loss )
664
+ current_validation_loss = validate_nn (reduced_problem , valid_dataloader_sigma ,
665
+ model_sigma , loss_fn_sigma )
666
+ validation_loss_sigma .append (current_validation_loss )
667
+ if epochs > 0 and current_validation_loss > 1.01 * min_validation_loss_sigma :
668
+ # 1% safety margin against min_validation_loss
669
+ # before invoking early stopping criteria
670
+ print (f"Early stopping criteria invoked at epoch: { epochs + 1 } " )
671
+ break
672
+ min_validation_loss_sigma = min (validation_loss_sigma )
673
+ end_time = time .time ()
674
+ elapsed_time = end_time - start_time
675
+ os .system (f"rm { checkpoint_path_sigma } " )
676
+ print (f"Training time (sigma): { elapsed_time } " )
677
+
678
+ path = "model_u.pth"
679
+ save_model (model_u , path )
680
+ # load_model(model_u, path)
681
+
682
+ training_loss_u = list ()
683
+ validation_loss_u = list ()
684
+
685
+ max_epochs_u = 50 # 20000
686
+ min_validation_loss_u = None
687
+ start_epoch_u = 0
688
+ checkpoint_path_u = "checkpoint_u"
689
+ checkpoint_epoch_u = 10
690
+
691
+ learning_rate_u = 5.e-6
692
+ optimiser_u = get_optimiser (model_u , "Adam" , learning_rate_u )
693
+ loss_fn_u = get_loss_func ("MSE" , reduction = "sum" )
694
+
695
+ if os .path .exists (checkpoint_path_u ):
696
+ start_epoch_u , min_validation_loss_u = \
697
+ load_checkpoint (checkpoint_path_u , model_u , optimiser_u )
698
+
699
+ import time
700
+ start_time = time .time ()
701
+ for epochs in range (start_epoch_u , max_epochs_u ):
702
+ if epochs > 0 and epochs % checkpoint_epoch_u == 0 :
703
+ save_checkpoint (checkpoint_path_u , epochs , model_u ,
704
+ optimiser_u , min_validation_loss_u )
705
+ print (f"Epoch: { epochs + 1 } /{ max_epochs_u } " )
706
+ current_training_loss = train_nn (reduced_problem , train_dataloader_u ,
707
+ model_u , loss_fn_u ,
708
+ optimiser_u )
709
+ training_loss_u .append (current_training_loss )
710
+ current_validation_loss = validate_nn (reduced_problem , valid_dataloader_u ,
711
+ model_u , loss_fn_u )
712
+ validation_loss_u .append (current_validation_loss )
713
+ if epochs > 0 and current_validation_loss > 1.01 * min_validation_loss_u :
714
+ # 1% safety margin against min_validation_loss
715
+ # before invoking early stopping criteria
716
+ print (f"Early stopping criteria invoked at epoch: { epochs + 1 } " )
717
+ break
718
+ min_validation_loss_u = min (validation_loss_u )
719
+ end_time = time .time ()
720
+ elapsed_time = end_time - start_time
721
+ os .system (f"rm { checkpoint_path_u } " )
722
+ print (f"Training time (u): { elapsed_time } " )
0 commit comments