@@ -247,13 +247,13 @@ def process_generate_categorical_fim(
247247 # STAGE-BASED
248248 if is_stage_based :
249249 # Generate Stage-Based CatFIM mapping
250- # does flows and inundation (mapping)
250+ # does flows and inundation (mapping)
251251
252252 catfim_sites_file_path = os .path .join (output_mapping_dir , 'stage_based_catfim_sites.gpkg' )
253253
254254 if step_num <= 1 :
255255
256- df_restricted_sites = load_restricted_sites ()
256+ df_restricted_sites = load_restricted_sites (is_stage_based )
257257
258258 generate_stage_based_categorical_fim (
259259 output_catfim_dir ,
@@ -300,6 +300,9 @@ def process_generate_categorical_fim(
300300 job_flows = job_number_huc * job_number_inundate
301301
302302 if step_num <= 1 :
303+
304+ df_restricted_sites = load_restricted_sites (is_stage_based )
305+
303306 generate_flows (
304307 output_catfim_dir ,
305308 nwm_us_search ,
@@ -310,6 +313,7 @@ def process_generate_categorical_fim(
310313 valid_ahps_hucs ,
311314 nwm_metafile ,
312315 FLOG .LOG_FILE_PATH ,
316+ df_restricted_sites ,
313317 )
314318 end = time .time ()
315319 elapsed_time = (end - start ) / 60
@@ -1167,23 +1171,31 @@ def __calc_stage_intervals(non_rec_stage_values_df, past_major_interval_cap, huc
11671171 return interval_recs
11681172
11691173
1170- def load_restricted_sites ():
1174+ def load_restricted_sites (is_stage_based ):
11711175 """
1172- At this point, only stage based uses this. But a arg of "catfim_type (stage or flow) or something
1173- can be added later.
1176+ Previously, only stage based used this. It is now being used by stage-based and flow-based (1/24/25)
1177+
1178+ The 'catfim_type' column can have three different values: 'stage', 'flow', and 'both'. This determines
1179+ whether the site should be filtered out for stage-based CatFIM, flow-based CatFIM, or both of them.
11741180
11751181 Returns: a dataframe for the restricted lid and the reason why:
1176- " nws_lid", " restricted_reason"
1182+ ' nws_lid', ' restricted_reason', 'catfim_type'
11771183 """
11781184
1179- file_name = "stage_based_ahps_restricted_sites .csv"
1185+ file_name = "ahps_restricted_sites .csv"
11801186 current_script_folder = os .path .dirname (__file__ )
11811187 file_path = os .path .join (current_script_folder , file_name )
11821188
11831189 df_restricted_sites = pd .read_csv (file_path , dtype = str )
11841190
11851191 df_restricted_sites ['nws_lid' ].fillna ("" , inplace = True )
11861192 df_restricted_sites ['restricted_reason' ].fillna ("" , inplace = True )
1193+ df_restricted_sites ['catfim_type' ].fillna ("" , inplace = True )
1194+
1195+ # remove extra empty spaces on either side of all cellls
1196+ df_restricted_sites ['nws_lid' ] = df_restricted_sites ['nws_lid' ].str .strip ()
1197+ df_restricted_sites ['restricted_reason' ] = df_restricted_sites ['restricted_reason' ].str .strip ()
1198+ df_restricted_sites ['catfim_type' ] = df_restricted_sites ['catfim_type' ].str .strip ()
11871199
11881200 # Need to drop the comment lines before doing any more processing
11891201 df_restricted_sites .drop (
@@ -1195,11 +1207,13 @@ def load_restricted_sites():
11951207 # There are enough conditions and a low number of rows that it is easier to
11961208 # test / change them via a for loop
11971209 indexs_for_recs_to_be_removed_from_list = []
1210+
1211+ # Clean up dataframe
11981212 for ind , row in df_restricted_sites .iterrows ():
11991213 nws_lid = row ['nws_lid' ]
12001214 restricted_reason = row ['restricted_reason' ]
12011215
1202- if len (nws_lid ) != 5 : # could be just a blank row in the
1216+ if len (nws_lid ) != 5 : # Invalid row, could be just a blank row in the file
12031217 FLOG .warning (
12041218 f"From the ahps_restricted_sites list, an invalid nws_lid value of '{ nws_lid } '"
12051219 " and has dropped from processing"
@@ -1213,14 +1227,22 @@ def load_restricted_sites():
12131227 df_restricted_sites .at [ind , 'restricted_reason' ] = restricted_reason
12141228 FLOG .warning (f"{ restricted_reason } . Lid is '{ nws_lid } '" )
12151229 continue
1216- # end for
1230+ # end loop
12171231
1218- # Invalid records (not dropping, just completely invalid recs from the csv)
1232+ # Invalid records in CSV (not dropping, just completely invalid recs from the csv)
12191233 # Could be just blank rows from the csv
12201234 if len (indexs_for_recs_to_be_removed_from_list ) > 0 :
12211235 df_restricted_sites = df_restricted_sites .drop (indexs_for_recs_to_be_removed_from_list ).reset_index ()
12221236
1223- # print(df_restricted_sites.head(10))
1237+ # Filter df_restricted_sites by CatFIM type
1238+ if is_stage_based == True : # Keep rows where 'catfim_type' is either 'stage' or 'both'
1239+ df_restricted_sites = df_restricted_sites [df_restricted_sites ['catfim_type' ].isin (['stage' , 'both' ])]
1240+
1241+ else : # Keep rows where 'catfim_type' is either 'flow' or 'both'
1242+ df_restricted_sites = df_restricted_sites [df_restricted_sites ['catfim_type' ].isin (['flow' , 'both' ])]
1243+
1244+ # Remove catfim_type column
1245+ df_restricted_sites .drop ('catfim_type' , axis = 1 , inplace = True )
12241246
12251247 return df_restricted_sites
12261248
@@ -1526,6 +1548,7 @@ def generate_stage_based_categorical_fim(
15261548 lst_hucs ,
15271549 nwm_metafile ,
15281550 str (FLOG .LOG_FILE_PATH ),
1551+ df_restricted_sites ,
15291552 )
15301553 )
15311554
0 commit comments