Skip to content

Commit

Permalink
Merge branch 'dev' into dev-lidar-bridge2
Browse files Browse the repository at this point in the history
  • Loading branch information
AliForghani-NOAA committed Jan 22, 2025
2 parents 91aa6d4 + 8e76d85 commit 77e1056
Show file tree
Hide file tree
Showing 31 changed files with 4,319 additions and 1,876 deletions.
74 changes: 37 additions & 37 deletions Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,55 +4,55 @@ url = "https://pypi.org/simple"
verify_ssl = true

[dev-packages]
ipython = "==8.24.0"
ipython = "==8.31.0"

[packages]
black = "==24.3.0"
boto3 = "1.35.1"
certifi = "==2024.7.4"
dask = "==2024.6.2"
dask-expr = "==1.1.6"
distributed = "==2024.6.2"
fiona = "==1.10.0"
flake8 = "==6.0.0"
black = "==24.10.0"
boto3 = "==1.35.87"
certifi = "==2024.12.14"
dask = "==2024.8.0"
dask-expr = "==1.1.10"
distributed = "==2024.8.0"
fiona = "==1.10.1"
flake8 = "==7.1.1"
flake8-pyproject = "==1.2.3"
geopandas = "==1.0.1"
gval = "==0.2.7.post1"
ipympl = "==0.9.3"
isort = "==5.12.0"
gval = "==0.2.9"
ipympl = "==0.9.5"
isort = "==5.13.2"
jupyter = "==1.1.1"
jupyterlab = "==4.2.6"
lmoments3 = "==1.0.6"
monaco = "==0.13.1"
natsort = "==8.3.1"
netcdf4 = "==1.6.3"
jupyterlab = "==4.3.4"
lmoments3 = "==1.0.8"
monaco = "==0.14.0"
natsort = "==8.4.0"
netcdf4 = "==1.7.2"
numba = "==0.60.0"
numpy = "==1.26.4"
openpyxl = "==3.1.2"
osmnx = "==1.9.3"
openpyxl = "==3.1.5"
osmnx = "==2.0.0"
pandas = "==2.0.2"
pillow = "==10.3.0"
pre-commit = "==3.3.3"
psycopg2-binary = "==2.9.6"
pyarrow = "==17.0.0"
pyflwdir = "==0.5.8"
pillow = "==11.0.0"
pre-commit = "==4.0.1"
psycopg2-binary = "==2.9.10"
py7zr = "==0.22.0"
pyarrow = "==18.1.0"
pyflwdir = "==0.5.9"
pyogrio = "==0.8.0"
pyproj = "==3.6.1"
python-dotenv = "==1.0.0"
py7zr = "==0.20.4"
rasterio = "==1.3.10"
rasterstats = "==0.19.0"
pyproj = "==3.7.0"
python-dotenv = "==1.0.1"
rasterio = "==1.4.3"
rasterstats = "==0.20.0"
requests = "==2.32.3"
richdem = "==0.3.4"
rtree = "==1.0.1"
scipy = "==1.14.0"
seaborn = "==0.12.2"
tables = "==3.8.0"
tqdm = "==4.66.3"
urllib3 = "==1.26.19"
rtree = "==1.3.0"
scipy = "==1.14.1"
seaborn = "==0.13.2"
tables = "==3.10.1"
tqdm = "==4.67.1"
urllib3 = "==2.3.0"
whitebox = "==2.3.5"
xarray = "==2024.6.0"
zarr = "==2.18.0"
xarray = "==2024.7.0"
zarr = "==2.18.3"

[requires]
python_version = "3.10"
2,697 changes: 1,045 additions & 1,652 deletions Pipfile.lock

Large diffs are not rendered by default.

917 changes: 917 additions & 0 deletions config/symbology/qgis/catfim_library.qml

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion data/bridges/pull_osm_bridges.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def pull_osm_features_by_huc(huc_bridge_file, huc_num, huc_geom):
gdf['railway'] = None

# Create the bridge_type column by combining above information
gdf['HUC'] = huc_num
gdf['bridge_type'] = gdf.apply(
lambda row: (
f"highway-{row['highway']}" if pd.notna(row['highway']) else f"railway-{row['railway']}"
Expand Down Expand Up @@ -187,7 +188,7 @@ def combine_huc_features(output_dir):
section_time = dt.datetime.now(dt.timezone.utc)
logging.info(f" .. started: {section_time.strftime('%m/%d/%Y %H:%M:%S')}")

all_bridges_gdf = all_bridges_gdf_raw[['osmid', 'name', 'bridge_type', 'geometry']]
all_bridges_gdf = all_bridges_gdf_raw[['osmid', 'name', 'bridge_type', 'HUC', 'geometry']]
all_bridges_gdf.to_file(osm_bridge_file, driver="GPKG")

return
Expand Down
1 change: 0 additions & 1 deletion data/create_vrt_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import argparse
import logging
import os
import sys
from datetime import datetime

from osgeo import gdal
Expand Down
81 changes: 81 additions & 0 deletions docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,87 @@
All notable changes to this project will be documented in this file.
We follow the [Semantic Versioning 2.0.0](http://semver.org/) format.

## v4.5.13.7 - 2025-01-10 - [PR#1379](https://github.com/NOAA-OWP/inundation-mapping/pull/1379)

There are many sites in non-CONUS regions (AK, PR, HI) where we would like to run CatFIM but they are being excluded because they are not NWM forecast points. This update brings back the double API pull and adds in some code to filter out duplicate (and NULL) lids from the metadata lists.

### Additions
- `inundation-mapping/tools/catfim/vis_categorical_fim.py`: Functions for reading in, processing, and visualizing CatFIM results.
- `inundation-mapping/tools/catfim/notebooks/vis_catfim_cross_section.ipynb`: A new Jupyter notebook for viewing and analyzing CatFIM results.
- `inundation-mapping/tools/catfim/notebooks/eval_catfim_metadata.ipynb`: A new Jupyter notebook for evaluating metadata and results from CatFIM runs.
- `inundation-mapping\config/symbology/qgis/catfim_library.qml`: Symbology preset for viewing CatFIM library in QGIS.


### Changes

- `inundation-mapping/tools/catfim/generate_categorical_fim_flows.py`: Re-implements the dual API call and filters out duplicate sites.


<br/><br/>

## v4.5.13.6 - 2025-01-10 - [PR#1387](https://github.com/NOAA-OWP/inundation-mapping/pull/1387)

Fixes two issues in test_cases:
1. An error in `synthesize_test_cases` and `run_test_case` if any directories of the 5 benchmark sources (BLE, NWS, IFC, USGS, or ras2fim) do not exist. This issue was originally discovered and fixed in #1178, but is being elevated to its own PR here. Fixes #1386.
2. Updated `run_test_cases` to accommodate levee and waterbody masking in Alaska. As part of these changes, hardcoded paths were replaced by environment variables.

### Changes

- `tools/`
- `run_test_case.py`: Fixed error if missing validation data. Updated masking data to include Alaska.
- `synthesize_test_cases.py`: Fixed error if missing validation data.

<br/><br/>


## v4.5.13.5 - 2025-01-09 - [PR#1389](https://github.com/NOAA-OWP/inundation-mapping/pull/1389)

Updates Python packages to resolve dependency conflicts that were preventing `Dockerfile.dev` to build on Mac. This also resolves two security warnings: https://github.com/NOAA-OWP/inundation-mapping/security/dependabot/51 and https://github.com/NOAA-OWP/inundation-mapping/security/dependabot/52.

### Changes

- `Pipfile` and `Pipfile.lock`: Upgrades Python packages

<br/><br/>


## v4.5.13.4 - 2024-01-03 - [PR#1382](https://github.com/NOAA-OWP/inundation-mapping/pull/1382)

Cleans up Python files within `delineate_hydros_and_produce_HAND.sh` to improve performance, especially memory management, including removing unused imports, deleting object references when objects are no longer needed, and removing GDAL from the `fim_process_unit_wb.sh` step of FIM pipeline. Contributes to #1351 and #1376.

### Changes
- `data/create_vrt_file.py` and `tools/pixel_counter.py`: Removes unused import
- `src/`
- `accumulate_headwaters.py`, `add_crosswalk.py`, `adjust_thalweg_lateral.py`, `filter_catchments_and_add_attributes.py`, `heal_bridges_osm.py`, `make_rem.py`, `make_stages_and_catchlist.py`, `mitigate_branch_outlet_backpool.py`, `reachID_grid_to_vector_points.py`, `split_flows.py`, `unique_pixel_and_allocation.py`: Deletes objects no longer in use
- `delineate_hydros_and_produce_HAND.sh`, `run_by_branch.sh`, `run_unit_wb.sh` : Updates arguments
- `getRasterInfoNative.py`: Refactors in `rasterio` (removed `gdal`)
- `tools/evaluate_crosswalk.py`: Deletes objects no longer in use

<br/><br/>


## v4.5.13.3 - 2025-01-03 - [PR#1048](https://github.com/NOAA-OWP/inundation-mapping/pull/1048)

This script produces inundation depths and attempts to overcome the catchment boundary issue by interpolating water surface elevations between catchments. Water surface calculations require the hydroconditioned DEM (`dem_thalwegCond_{}.tif`) for computation, however, this file is not in the standard outputs from fim_pipeline.sh. Therefore, users may have to re-run fim_pipeline.sh with dem_thalwegCond_{}.tif removed from all deny lists.

### Additions

- `tools/interpolate_water_surface.py`: New post-inundation processing tool for extending depths beyond catchment limits. The `interpolate_wse()` contains the logic for computing the updated depth raster, but users can also call this module directly to perform inundation, similar to how `inundate_mosaic_wrapper.py` works, but with the new post-processing enhancement.

<br/><br/>


## v4.5.13.2 - 2025-01-03 - [PR#1360](https://github.com/NOAA-OWP/inundation-mapping/pull/1360)

Fixed missing osmid in osm_bridge_centroid.gpkg. Also, HUC column is added to outputs.

### Changes
- `data/bridges/pull_osm_bridges.py`
- `src/aggregate_by_huc.py`

<br/><br/>


## v4.5.13.1 - 2024-12-13 - [PR#1361](https://github.com/NOAA-OWP/inundation-mapping/pull/1361)

This PR was triggered by two dep-bot PR's. One for Tornado, one for aiohttp. Upon further research, these two exist only as dependencies for Jupyter and Jupyterlab which were very out of date. Upgrading Jupyter/JupyterLab took care of the other two.
Expand Down
11 changes: 8 additions & 3 deletions src/accumulate_headwaters.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,6 @@ def accumulate_flow(
data = src.read(1)
nodata = src.nodata
profile = src.profile
# transform = src.transform
# crs = src.crs
# latlon = crs.to_epsg() == 4326

# Convert the TauDEM flow direction raster to a pyflwdir flow direction array
temp = data.copy()
Expand All @@ -55,17 +52,23 @@ def accumulate_flow(
temp[data == 8] = 2
temp[data == nodata] = 247

del data

temp = temp.astype(np.uint8)

flw = pyflwdir.from_array(temp, ftype='d8')

del temp

# Read the flow direction raster
with rio.open(headwaters_filename) as src:
headwaters = src.read(1)
nodata = src.nodata

flowaccum = flw.accuflux(headwaters, nodata=nodata, direction='up')

del flw

stream = np.where(flowaccum > 0, flow_accumulation_threshold, 0)

# Write the flow accumulation raster
Expand All @@ -76,6 +79,8 @@ def accumulate_flow(
dst.write(flowaccum, 1)
dst2.write(stream, 1)

del flowaccum, stream


if __name__ == '__main__':
parser = argparse.ArgumentParser()
Expand Down
45 changes: 21 additions & 24 deletions src/add_crosswalk.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,38 +29,21 @@ def add_crosswalk(
output_hydro_table_fileName,
input_huc_fileName,
input_nwmflows_fileName,
input_nwmcatras_fileName,
mannings_n,
input_nwmcat_fileName,
extent,
small_segments_filename,
min_catchment_area,
min_stream_length,
huc_id,
calibration_mode=False,
):
input_catchments = gpd.read_file(input_catchments_fileName, engine="pyogrio", use_arrow=True)
input_flows = gpd.read_file(input_flows_fileName, engine="pyogrio", use_arrow=True)
input_huc = gpd.read_file(input_huc_fileName, engine="pyogrio", use_arrow=True)
input_nwmcat = gpd.read_file(input_nwmcat_fileName, engine="pyogrio", use_arrow=True)
input_nwmflows = gpd.read_file(input_nwmflows_fileName, engine="pyogrio", use_arrow=True)
min_catchment_area = float(min_catchment_area) # 0.25#
min_stream_length = float(min_stream_length) # 0.5#

input_catchments = input_catchments.dissolve(by='HydroID').reset_index()

## crosswalk using stream segment midpoint method
input_nwmcat = gpd.read_file(input_nwmcat_fileName, mask=input_huc, engine="fiona")

# only reduce nwm catchments to mainstems if running mainstems
if extent == 'MS':
input_nwmcat = input_nwmcat.loc[input_nwmcat.mainstem == 1]

input_nwmcat = input_nwmcat.rename(columns={'ID': 'feature_id'})
if input_nwmcat.feature_id.dtype != 'int':
input_nwmcat.feature_id = input_nwmcat.feature_id.astype(int)
input_nwmcat = input_nwmcat.set_index('feature_id')

input_nwmflows = input_nwmflows.rename(columns={'ID': 'feature_id'})
if input_nwmflows.feature_id.dtype != 'int':
input_nwmflows.feature_id = input_nwmflows.feature_id.astype(int)
Expand Down Expand Up @@ -89,6 +72,8 @@ def add_crosswalk(
crosswalk = crosswalk.filter(items=['HydroID', 'feature_id', 'distance'])
crosswalk = crosswalk.merge(input_nwmflows[['order_']], on='feature_id')

del input_nwmflows

if crosswalk.empty:
print("No relevant streams within HUC boundaries.")
sys.exit(FIM_exit_codes.NO_VALID_CROSSWALKS.value)
Expand All @@ -97,6 +82,8 @@ def add_crosswalk(
input_catchments.HydroID = input_catchments.HydroID.astype(int)
output_catchments = input_catchments.merge(crosswalk, on='HydroID')

del input_catchments

if output_catchments.empty:
print("No valid catchments remain.")
sys.exit(FIM_exit_codes.NO_VALID_CROSSWALKS.value)
Expand All @@ -105,6 +92,8 @@ def add_crosswalk(
input_flows.HydroID = input_flows.HydroID.astype(int)
output_flows = input_flows.merge(crosswalk, on='HydroID')

del input_flows

# added for GMS. Consider adding filter_catchments_and_add_attributes.py to run_by_branch.sh
if 'areasqkm' not in output_catchments.columns:
output_catchments['areasqkm'] = output_catchments.geometry.area / (1000**2)
Expand Down Expand Up @@ -276,6 +265,9 @@ def add_crosswalk(
input_src_base['Bathymetry_source'] = pd.NA

output_src = input_src_base.drop(columns=['CatchId']).copy()

del input_src_base

if output_src.HydroID.dtype != 'int':
output_src.HydroID = output_src.HydroID.astype(int)

Expand Down Expand Up @@ -304,6 +296,9 @@ def add_crosswalk(
)
merged_output_src = merged_output_src[['HydroID', 'Stage', 'Discharge (m3s-1)_df2']]
output_src = pd.merge(output_src, merged_output_src, on=['HydroID', 'Stage'], how='left')

del merged_output_src

output_src['Discharge (m3s-1)'] = output_src['Discharge (m3s-1)_df2'].fillna(
output_src['Discharge (m3s-1)']
)
Expand All @@ -322,8 +317,12 @@ def add_crosswalk(
['Discharge (m3s-1)'],
] = src_stage[1]

del sml_segs

output_src = output_src.merge(crosswalk[['HydroID', 'feature_id']], on='HydroID')

del crosswalk

output_crosswalk = output_src[['HydroID', 'feature_id']]
output_crosswalk = output_crosswalk.drop_duplicates(ignore_index=True)

Expand Down Expand Up @@ -386,6 +385,8 @@ def add_crosswalk(
input_huc[FIM_ID] = input_huc[FIM_ID].astype(str)
output_hydro_table = output_hydro_table.merge(input_huc.loc[:, [FIM_ID, 'HUC8']], how='left', on=FIM_ID)

del input_huc

if output_flows.HydroID.dtype != 'str':
output_flows.HydroID = output_flows.HydroID.astype(str)
output_hydro_table = output_hydro_table.merge(
Expand Down Expand Up @@ -428,6 +429,8 @@ def add_crosswalk(
with open(output_src_json_fileName, 'w') as f:
json.dump(output_src_json, f, sort_keys=True, indent=2)

del output_catchments, output_flows, output_src, output_crosswalk, output_hydro_table, output_src_json


if __name__ == '__main__':
parser = argparse.ArgumentParser(
Expand All @@ -452,22 +455,16 @@ def add_crosswalk(
parser.add_argument("-t", "--output-hydro-table-fileName", help="Hydrotable", required=True)
parser.add_argument("-w", "--input-huc-fileName", help="HUC8 boundary", required=True)
parser.add_argument("-b", "--input-nwmflows-fileName", help="Subest NWM burnlines", required=True)
parser.add_argument("-y", "--input-nwmcatras-fileName", help="NWM catchment raster", required=False)
parser.add_argument(
"-m",
"--mannings-n",
help="Mannings n. Accepts single parameter set or list of parameter set in calibration mode. Currently input as csv.",
required=True,
)
parser.add_argument("-u", "--huc-id", help="HUC ID", required=False)
parser.add_argument("-z", "--input-nwmcat-fileName", help="NWM catchment polygon", required=True)
parser.add_argument("-p", "--extent", help="GMS only for now", default="GMS", required=False)
parser.add_argument("-u", "--huc-id", help="HUC ID", required=True)
parser.add_argument(
"-k", "--small-segments-filename", help="output list of short segments", required=True
)
parser.add_argument(
"-c", "--calibration-mode", help="Mannings calibration flag", required=False, action="store_true"
)
parser.add_argument("-e", "--min-catchment-area", help="Minimum catchment area", required=True)
parser.add_argument("-g", "--min-stream-length", help="Minimum stream length", required=True)

Expand Down
Loading

0 comments on commit 77e1056

Please sign in to comment.