From 20078643cb76b40daf9cd2d947c59a88d56d1855 Mon Sep 17 00:00:00 2001 From: CatherineThomas-NOAA <59020064+CatherineThomas-NOAA@users.noreply.github.com> Date: Thu, 19 Dec 2024 22:08:50 -0500 Subject: [PATCH 01/33] Update compression options for GEFS history files (#3184) # Description Different compression options are applied for the high resolution history files. The value of quantize_nsd will be different depending on the value of quantize_mode, so a fix is required to obtain the same behavior as the previous compression options. This option needs to be updated for both GFS and GEFS. Resolves: #3178 --- parm/config/gefs/config.ufs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs index 5b7ba4c0af..e5859bd801 100644 --- a/parm/config/gefs/config.ufs +++ b/parm/config/gefs/config.ufs @@ -268,7 +268,7 @@ case ${fv3_res} in "C384" | "C768" | "C1152" | "C3072") zstandard_level=0 ideflate=1 - quantize_nsd=5 + quantize_nsd=14 OUTPUT_FILETYPE_ATM="netcdf_parallel" if [[ "${fv3_res}" == "C384" ]]; then OUTPUT_FILETYPE_SFC="netcdf" # For C384, the write grid component is better off with serial netcdf From d479111aef597bb0c17ac0af48ddc489f5e13868 Mon Sep 17 00:00:00 2001 From: Cory Martin Date: Fri, 20 Dec 2024 02:18:51 -0500 Subject: [PATCH 02/33] Switch snow DA to use 2DVar for deterministic and ensemble mean (#3163) This PR moves the snow DA from a LETKF-OI approach with a fake ensemble to a 2DVar approach for the deterministic and the ensemble mean for GDAS/GFS. This PR also adds support for JCB and refactoring of the jobs to leverage the Jedi class in the workflow. Resolves #3002 --------- Co-authored-by: RussTreadon-NOAA Co-authored-by: Jiarui Dong Co-authored-by: DavidNew-NOAA Co-authored-by: Cory Martin Co-authored-by: Walter Kolczynski - NOAA --- env/HERA.env | 10 +- env/HERCULES.env | 8 +- env/JET.env | 8 +- env/ORION.env | 8 +- env/S4.env | 8 +- env/WCOSS2.env | 8 +- jobs/JGDAS_ENKF_ARCHIVE | 3 +- ...SNOW_RECENTER => JGLOBAL_SNOWENS_ANALYSIS} | 18 +- jobs/JGLOBAL_SNOW_ANALYSIS | 10 +- jobs/rocoto/esnowanl.sh | 26 + jobs/rocoto/esnowrecen.sh | 18 - parm/archive/enkf.yaml.j2 | 8 + parm/archive/gdas_restarta.yaml.j2 | 2 +- parm/config/gfs/config.esnowanl | 38 + parm/config/gfs/config.esnowrecen | 29 - parm/config/gfs/config.resources | 12 +- parm/config/gfs/config.snowanl | 17 +- parm/gdas/esnowanl_jedi_config.yaml.j2 | 14 + parm/gdas/snow_stage_ens_update.yaml.j2 | 38 +- parm/gdas/snow_stage_orog.yaml.j2 | 4 - parm/gdas/snowanl_jedi_config.yaml.j2 | 7 + parm/gdas/staging/snow_berror.yaml.j2 | 4 + parm/gdas/staging/snow_var_bkg.yaml.j2 | 8 + scripts/exgdas_enkf_earc.py | 2 +- scripts/exgdas_enkf_snow_recenter.py | 30 - scripts/exglobal_snow_analysis.py | 23 +- scripts/exglobal_snowens_analysis.py | 43 ++ sorc/gdas.cd | 2 +- sorc/link_workflow.sh | 2 +- ush/python/pygfs/task/snow_analysis.py | 516 +++++--------- ush/python/pygfs/task/snowens_analysis.py | 672 +++++++++--------- versions/fix.ver | 1 + workflow/applications/gfs_cycled.py | 4 +- workflow/rocoto/gfs_tasks.py | 14 +- workflow/rocoto/tasks.py | 2 +- 35 files changed, 782 insertions(+), 835 deletions(-) rename jobs/{JGDAS_ENKF_SNOW_RECENTER => JGLOBAL_SNOWENS_ANALYSIS} (83%) create mode 100755 jobs/rocoto/esnowanl.sh delete mode 100755 jobs/rocoto/esnowrecen.sh create mode 100644 parm/config/gfs/config.esnowanl delete mode 100644 parm/config/gfs/config.esnowrecen create mode 100644 parm/gdas/esnowanl_jedi_config.yaml.j2 create mode 100644 parm/gdas/snowanl_jedi_config.yaml.j2 create mode 100644 parm/gdas/staging/snow_berror.yaml.j2 create mode 100644 parm/gdas/staging/snow_var_bkg.yaml.j2 delete mode 100755 scripts/exgdas_enkf_snow_recenter.py create mode 100755 scripts/exglobal_snowens_analysis.py diff --git a/env/HERA.env b/env/HERA.env index f10bfcc537..051287004b 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -115,14 +115,16 @@ elif [[ "${step}" = "snowanl" ]]; then export APRUN_CALCFIMS="${launcher} -n 1" export NTHREADS_SNOWANL=${NTHREADSmax} - export APRUN_SNOWANL="${APRUN_default} --cpus-per-task=${NTHREADS_SNOWANL}" + export APRUN_SNOWANL="${APRUN_default} --mem=0 --cpus-per-task=${NTHREADS_SNOWANL}" export APRUN_APPLY_INCR="${launcher} -n 6" -elif [[ "${step}" = "esnowrecen" ]]; then +elif [[ "${step}" = "esnowanl" ]]; then - export NTHREADS_ESNOWRECEN=${NTHREADSmax} - export APRUN_ESNOWRECEN="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWRECEN}" + export APRUN_CALCFIMS="${launcher} -n 1" + + export NTHREADS_ESNOWANL=${NTHREADSmax} + export APRUN_ESNOWANL="${APRUN_default} --mem=0 --cpus-per-task=${NTHREADS_ESNOWANL}" export APRUN_APPLY_INCR="${launcher} -n 6" diff --git a/env/HERCULES.env b/env/HERCULES.env index 3a59b1992d..acfbe438ef 100755 --- a/env/HERCULES.env +++ b/env/HERCULES.env @@ -118,10 +118,12 @@ case ${step} in export APRUN_APPLY_INCR="${launcher} -n 6" ;; - "esnowrecen") + "esnowanl") - export NTHREADS_ESNOWRECEN=${NTHREADSmax} - export APRUN_ESNOWRECEN="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWRECEN}" + export APRUN_CALCFIMS="${launcher} -n 1" + + export NTHREADS_ESNOWANL=${NTHREADSmax} + export APRUN_ESNOWANL="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWANL}" export APRUN_APPLY_INCR="${launcher} -n 6" ;; diff --git a/env/JET.env b/env/JET.env index 6465b69acd..7bfd912062 100755 --- a/env/JET.env +++ b/env/JET.env @@ -102,10 +102,12 @@ elif [[ "${step}" = "snowanl" ]]; then export APRUN_APPLY_INCR="${launcher} -n 6" -elif [[ "${step}" = "esnowrecen" ]]; then +elif [[ "${step}" = "esnowanl" ]]; then - export NTHREADS_ESNOWRECEN=${NTHREADSmax} - export APRUN_ESNOWRECEN="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWRECEN}" + export APRUN_CALCFIMS="${launcher} -n 1" + + export NTHREADS_ESNOWANL=${NTHREADSmax} + export APRUN_ESNOWANL="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWANL}" export APRUN_APPLY_INCR="${launcher} -n 6" diff --git a/env/ORION.env b/env/ORION.env index 1dc49e9362..fbe00c153c 100755 --- a/env/ORION.env +++ b/env/ORION.env @@ -109,10 +109,12 @@ elif [[ "${step}" = "snowanl" ]]; then export APRUN_APPLY_INCR="${launcher} -n 6" -elif [[ "${step}" = "esnowrecen" ]]; then +elif [[ "${step}" = "esnowanl" ]]; then - export NTHREADS_ESNOWRECEN=${NTHREADSmax} - export APRUN_ESNOWRECEN="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWRECEN}" + export APRUN_CALCFIMS="${launcher} -n 1" + + export NTHREADS_ESNOWANL=${NTHREADSmax} + export APRUN_ESNOWANL="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWANL}" export APRUN_APPLY_INCR="${launcher} -n 6" diff --git a/env/S4.env b/env/S4.env index 9a5baf29ed..39d24e19ec 100755 --- a/env/S4.env +++ b/env/S4.env @@ -102,10 +102,12 @@ elif [[ "${step}" = "snowanl" ]]; then export APRUN_APPLY_INCR="${launcher} -n 6" -elif [[ "${step}" = "esnowrecen" ]]; then +elif [[ "${step}" = "esnowanl" ]]; then - export NTHREADS_ESNOWRECEN=${NTHREADSmax} - export APRUN_ESNOWRECEN="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWRECEN}" + export APRUN_CALCFIMS="${launcher} -n 1" + + export NTHREADS_ESNOWANL=${NTHREADSmax} + export APRUN_ESNOWANL="${APRUN_default} --cpus-per-task=${NTHREADS_ESNOWANL}" export APRUN_APPLY_INCR="${launcher} -n 6" diff --git a/env/WCOSS2.env b/env/WCOSS2.env index 4e8d1ddfea..e787202d66 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -95,10 +95,12 @@ elif [[ "${step}" = "snowanl" ]]; then export APRUN_APPLY_INCR="${launcher} -n 6" -elif [[ "${step}" = "esnowrecen" ]]; then +elif [[ "${step}" = "esnowanl" ]]; then - export NTHREADS_ESNOWRECEN=${NTHREADSmax} - export APRUN_ESNOWRECEN="${APRUN_default}" + export APRUN_CALCFIMS="${launcher} -n 1" + + export NTHREADS_ESNOWANL=${NTHREADSmax} + export APRUN_ESNOWANL="${APRUN_default}" export APRUN_APPLY_INCR="${launcher} -n 6" diff --git a/jobs/JGDAS_ENKF_ARCHIVE b/jobs/JGDAS_ENKF_ARCHIVE index 29ef9c1812..021c454afc 100755 --- a/jobs/JGDAS_ENKF_ARCHIVE +++ b/jobs/JGDAS_ENKF_ARCHIVE @@ -10,7 +10,8 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "earc" -c "base earc" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_TOP MEMDIR="ensstat" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ COMIN_ATMOS_ANALYSIS_ENSSTAT:COM_ATMOS_ANALYSIS_TMPL \ - COMIN_ATMOS_HISTORY_ENSSTAT:COM_ATMOS_HISTORY_TMPL + COMIN_ATMOS_HISTORY_ENSSTAT:COM_ATMOS_HISTORY_TMPL \ + COMIN_SNOW_ANALYSIS_ENSSTAT:COM_SNOW_ANALYSIS_TMPL ############################################################### # Run archive script diff --git a/jobs/JGDAS_ENKF_SNOW_RECENTER b/jobs/JGLOBAL_SNOWENS_ANALYSIS similarity index 83% rename from jobs/JGDAS_ENKF_SNOW_RECENTER rename to jobs/JGLOBAL_SNOWENS_ANALYSIS index 05d46cffc2..ca23347bca 100755 --- a/jobs/JGDAS_ENKF_SNOW_RECENTER +++ b/jobs/JGLOBAL_SNOWENS_ANALYSIS @@ -1,7 +1,7 @@ #! /usr/bin/env bash source "${HOMEgfs}/ush/preamble.sh" -source "${HOMEgfs}/ush/jjob_header.sh" -e "esnowrecen" -c "base esnowrecen" +source "${HOMEgfs}/ush/jjob_header.sh" -e "esnowanl" -c "base esnowanl" ############################################## # Set variables used in the script @@ -10,19 +10,18 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "esnowrecen" -c "base esnowrecen" # shellcheck disable=SC2153 GDUMP="gdas" export GDUMP +CDUMP=${RUN/enkf} +export CDUMP ############################################## # Begin JOB SPECIFIC work ############################################## # Generate COM variables from templates +RUN=${CDUMP} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ + COMIN_OBS:COM_OBS_TMPL YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ - COMIN_OBS:COM_OBS_TMPL \ COMOUT_ATMOS_ANALYSIS:COM_ATMOS_ANALYSIS_TMPL \ COMOUT_CONF:COM_CONF_TMPL -MEMDIR="ensstat" YMD=${PDY} HH=${cyc} declare_from_tmpl \ - COMOUT_SNOW_ANALYSIS:COM_SNOW_ANALYSIS_TMPL - -mkdir -p "${COMOUT_SNOW_ANALYSIS}" "${COMOUT_CONF}" for imem in $(seq 1 "${NMEM_ENS}"); do memchar="mem$(printf %03i "${imem}")" @@ -31,10 +30,15 @@ for imem in $(seq 1 "${NMEM_ENS}"); do mkdir -p "${COMOUT_SNOW_ANALYSIS}" done +MEMDIR="ensstat" YMD=${PDY} HH=${cyc} declare_from_tmpl -x\ + COMOUT_SNOW_ANALYSIS:COM_SNOW_ANALYSIS_TMPL + +mkdir -p "${COMOUT_SNOW_ANALYSIS}" "${COMOUT_CONF}" + ############################################################### # Run relevant script -EXSCRIPT=${SNOWANLPY:-${SCRgfs}/exgdas_enkf_snow_recenter.py} +EXSCRIPT=${SNOWANLPY:-${SCRgfs}/exglobal_snowens_analysis.py} ${EXSCRIPT} status=$? (( status != 0 )) && exit "${status}" diff --git a/jobs/JGLOBAL_SNOW_ANALYSIS b/jobs/JGLOBAL_SNOW_ANALYSIS index e0f24fa624..1642042b89 100755 --- a/jobs/JGLOBAL_SNOW_ANALYSIS +++ b/jobs/JGLOBAL_SNOW_ANALYSIS @@ -1,7 +1,6 @@ #! /usr/bin/env bash source "${HOMEgfs}/ush/preamble.sh" -export DATA=${DATA:-${DATAROOT}/${RUN}snowanl_${cyc}} source "${HOMEgfs}/ush/jjob_header.sh" -e "snowanl" -c "base snowanl" ############################################## @@ -18,12 +17,15 @@ GDUMP="gdas" # Begin JOB SPECIFIC work ############################################## # Generate COM variables from templates -YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OBS COM_SNOW_ANALYSIS COM_CONF +YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ + COMIN_OBS:COM_OBS_TMPL \ + COMOUT_SNOW_ANALYSIS:COM_SNOW_ANALYSIS_TMPL \ + COMOUT_CONF:COM_CONF_TMPL RUN=${GDUMP} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \ - COM_ATMOS_RESTART_PREV:COM_ATMOS_RESTART_TMPL + COMIN_ATMOS_RESTART_PREV:COM_ATMOS_RESTART_TMPL -mkdir -m 775 -p "${COM_SNOW_ANALYSIS}" "${COM_CONF}" +mkdir -m 775 -p "${COMOUT_SNOW_ANALYSIS}" "${COMOUT_CONF}" ############################################################### # Run relevant script diff --git a/jobs/rocoto/esnowanl.sh b/jobs/rocoto/esnowanl.sh new file mode 100755 index 0000000000..a6a87f8492 --- /dev/null +++ b/jobs/rocoto/esnowanl.sh @@ -0,0 +1,26 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" + +############################################################### +# Source UFSDA workflow modules +. "${HOMEgfs}/ush/load_ufsda_modules.sh" +status=$? +[[ ${status} -ne 0 ]] && exit "${status}" + +export job="esnowanl" +export jobid="${job}.$$" + +############################################################### +# setup python path for ioda utilities +# shellcheck disable=SC2311 +pyiodaPATH="${HOMEgfs}/sorc/gdas.cd/build/lib/python$(detect_py_ver)/" +gdasappPATH="${HOMEgfs}/sorc/gdas.cd/sorc/iodaconv/src:${pyiodaPATH}" +PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}:${gdasappPATH}" +export PYTHONPATH + +############################################################### +# Execute the JJOB +"${HOMEgfs}/jobs/JGLOBAL_SNOWENS_ANALYSIS" +status=$? +exit "${status}" diff --git a/jobs/rocoto/esnowrecen.sh b/jobs/rocoto/esnowrecen.sh deleted file mode 100755 index f8c4f8f7fc..0000000000 --- a/jobs/rocoto/esnowrecen.sh +++ /dev/null @@ -1,18 +0,0 @@ -#! /usr/bin/env bash - -source "${HOMEgfs}/ush/preamble.sh" - -############################################################### -# Source UFSDA workflow modules -. "${HOMEgfs}/ush/load_ufsda_modules.sh" -status=$? -[[ ${status} -ne 0 ]] && exit "${status}" - -export job="esnowrecen" -export jobid="${job}.$$" - -############################################################### -# Execute the JJOB -"${HOMEgfs}/jobs/JGDAS_ENKF_SNOW_RECENTER" -status=$? -exit "${status}" diff --git a/parm/archive/enkf.yaml.j2 b/parm/archive/enkf.yaml.j2 index f5662bc687..9f9ad296f8 100644 --- a/parm/archive/enkf.yaml.j2 +++ b/parm/archive/enkf.yaml.j2 @@ -70,6 +70,14 @@ enkf: - "{{ COMIN_ATMOS_ANALYSIS_ENSSTAT | relpath(ROTDIR) }}/{{ head }}{{ file }}" {% endfor %} + {% if DO_JEDISNOWDA %} + - "{{ COMIN_SNOW_ANALYSIS_ENSSTAT | relpath(ROTDIR) }}/{{ head }}snowstat.tgz" + {% for itile in range(1,7) %} + # Snow analysis is 3dvar + - "{{ COMIN_SNOW_ANALYSIS_ENSSTAT | relpath(ROTDIR) }}/snowinc.{{ cycle_YMD }}.{{ cycle_HH }}0000.sfc_data.tile{{ itile }}.nc" + {% endfor %} + {% endif %} + # Ensemble mean analyses/increments # 6-hr analysis/increment {% if do_calc_increment %} diff --git a/parm/archive/gdas_restarta.yaml.j2 b/parm/archive/gdas_restarta.yaml.j2 index fc5ce9478d..824010a0ee 100644 --- a/parm/archive/gdas_restarta.yaml.j2 +++ b/parm/archive/gdas_restarta.yaml.j2 @@ -47,7 +47,7 @@ gdas_restarta: # Snow configuration yaml {% if DO_JEDISNOWDA %} - - "{{ COMIN_CONF | relpath(ROTDIR) }}/{{ head }}letkfoi.yaml" + - "{{ COMIN_CONF | relpath(ROTDIR) }}/{{ head }}snowanlvar.yaml" {% endif %} # Input BUFR files diff --git a/parm/config/gfs/config.esnowanl b/parm/config/gfs/config.esnowanl new file mode 100644 index 0000000000..dde8970482 --- /dev/null +++ b/parm/config/gfs/config.esnowanl @@ -0,0 +1,38 @@ +#! /usr/bin/env bash + +########## config.esnowanl ########## +# configuration common to snow ensemble analysis tasks + +echo "BEGIN: config.esnowanl" + +# Get task specific resources +source "${EXPDIR}/config.resources" esnowanl + +export OBS_LIST="${PARMgfs}/gdas/snow/obs/lists/gdas_snow.yaml.j2" +export GTS_SNOW_STAGE_YAML="${PARMgfs}/gdas/snow/obs/config/bufr2ioda_mapping.yaml.j2" + +export JCB_BASE_YAML="${PARMgfs}/gdas/snow/jcb-base.yaml.j2" +export JCB_ALGO_YAML_VAR="${PARMgfs}/gdas/snow/jcb-prototype_2dvar.yaml.j2" + +# Process IMS snowcover into snow depth +export IMS_OBS_LIST="${PARMgfs}/gdas/snow/prep/prep_ims.yaml.j2" +export CALCFIMSEXE="${EXECgfs}/calcfIMS.exe" +export FIMS_NML_TMPL="${PARMgfs}/gdas/snow/prep/fims.nml.j2" +export IMS2IODACONV="${USHgfs}/imsfv3_scf2ioda.py" + +export JEDI_FIX_YAML="${PARMgfs}/gdas/snow_jedi_fix.yaml.j2" +export BERROR_STAGING_YAML="${PARMgfs}/gdas/staging/snow_berror.yaml.j2" +export SNOW_ENS_STAGE_TMPL="${PARMgfs}/gdas/snow_stage_ens_update.yaml.j2" +export SNOW_OROG_STAGE_TMPL="${PARMgfs}/gdas/snow_stage_orog.yaml.j2" +export SNOW_ENS_FINALIZE_TMPL="${PARMgfs}/gdas/snow_finalize_ens_update.yaml.j2" + +# Name of the executable that applies increment to bkg and its namelist template +export APPLY_INCR_EXE="${EXECgfs}/apply_incr.exe" +export ENS_APPLY_INCR_NML_TMPL="${PARMgfs}/gdas/snow/ens_apply_incr_nml.j2" + +export JEDI_CONFIG_YAML="${PARMgfs}/gdas/esnowanl_jedi_config.yaml.j2" + +export io_layout_x=@IO_LAYOUT_X@ +export io_layout_y=@IO_LAYOUT_Y@ + +echo "END: config.esnowanl" diff --git a/parm/config/gfs/config.esnowrecen b/parm/config/gfs/config.esnowrecen deleted file mode 100644 index adb039559a..0000000000 --- a/parm/config/gfs/config.esnowrecen +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/env bash - -########## config.esnowrecen ########## -# configuration common to snow ensemble analysis tasks - -echo "BEGIN: config.esnowrecen" - -# Get task specific resources -source "${EXPDIR}/config.resources" esnowrecen - -export JCB_BASE_YAML="${PARMgfs}/gdas/snow/jcb-base.yaml.j2" -export JCB_ALGO_YAML="${PARMgfs}/gdas/snow/jcb-fv3jedi_land_ensrecenter.yaml.j2" - -export JEDI_FIX_YAML="${PARMgfs}/gdas/atm_jedi_fix.yaml.j2" -export SNOW_ENS_STAGE_TMPL="${PARMgfs}/gdas/snow_stage_ens_update.yaml.j2" -export SNOW_OROG_STAGE_TMPL="${PARMgfs}/gdas/snow_stage_orog.yaml.j2" -export SNOW_ENS_FINALIZE_TMPL="${PARMgfs}/gdas/snow_finalize_ens_update.yaml.j2" - -# Name of the executable that applies increment to bkg and its namelist template -export APPLY_INCR_EXE="${EXECgfs}/apply_incr.exe" -export ENS_APPLY_INCR_NML_TMPL="${PARMgfs}/gdas/snow/letkfoi/ens_apply_incr_nml.j2" - -export io_layout_x=@IO_LAYOUT_X@ -export io_layout_y=@IO_LAYOUT_Y@ - -export JEDIEXE=${EXECgfs}/gdasapp_land_ensrecenter.x -export FREGRID=${EXECgfs}/fregrid.x - -echo "END: config.esnowrecen" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index e642082290..230872b8f3 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -15,7 +15,7 @@ if (( $# != 1 )); then echo "prep prepatmiodaobs" echo "atmanlinit atmanlvar atmanlfv3inc atmanlfinal" echo "atmensanlinit atmensanlobs atmensanlsol atmensanlletkf atmensanlfv3inc atmensanlfinal" - echo "snowanl esnowrecen" + echo "snowanl esnowanl" echo "prepobsaero aeroanlinit aeroanlvar aeroanlfinal aeroanlgenb" echo "anal sfcanl analcalc analdiag fcst echgres" echo "upp atmos_products" @@ -346,11 +346,11 @@ case ${step} in walltime="00:15:00" ntasks=$(( layout_x * layout_y * 6 )) - threads_per_task=1 + threads_per_task=2 tasks_per_node=$(( max_tasks_per_node / threads_per_task )) ;; - "esnowrecen") + "esnowanl") # below lines are for creating JEDI YAML case ${CASE} in "C768") @@ -373,9 +373,9 @@ case ${step} in export layout_x export layout_y - walltime="00:15:00" + walltime="00:30:00" ntasks=$(( layout_x * layout_y * 6 )) - threads_per_task=1 + threads_per_task=2 tasks_per_node=$(( max_tasks_per_node / threads_per_task )) ;; @@ -1213,7 +1213,7 @@ case ${step} in ;; "esfc") - walltime="00:15:00" + walltime="00:25:00" ntasks=80 threads_per_task=1 tasks_per_node=$(( max_tasks_per_node / threads_per_task )) diff --git a/parm/config/gfs/config.snowanl b/parm/config/gfs/config.snowanl index 1aeaf58e46..67a4fc012f 100644 --- a/parm/config/gfs/config.snowanl +++ b/parm/config/gfs/config.snowanl @@ -11,13 +11,8 @@ source "${EXPDIR}/config.resources" snowanl export OBS_LIST="${PARMgfs}/gdas/snow/obs/lists/gdas_snow.yaml.j2" export GTS_SNOW_STAGE_YAML="${PARMgfs}/gdas/snow/obs/config/bufr2ioda_mapping.yaml.j2" -# Name of the JEDI executable and its yaml template -export JEDIEXE="${EXECgfs}/gdas.x" -export JEDIYAML="${PARMgfs}/gdas/snow/letkfoi/letkfoi.yaml.j2" - -# Ensemble member properties -export SNOWDEPTHVAR="snodl" -export BESTDDEV="30." # Background Error Std. Dev. for LETKFOI +export JCB_BASE_YAML="${PARMgfs}/gdas/snow/jcb-base.yaml.j2" +export JCB_ALGO_YAML_VAR="${PARMgfs}/gdas/snow/jcb-prototype_2dvar.yaml.j2" # Process IMS snowcover into snow depth export IMS_OBS_LIST="${PARMgfs}/gdas/snow/prep/prep_ims.yaml.j2" @@ -27,9 +22,15 @@ export IMS2IODACONV="${USHgfs}/imsfv3_scf2ioda.py" # Name of the executable that applies increment to bkg and its namelist template export APPLY_INCR_EXE="${EXECgfs}/apply_incr.exe" -export APPLY_INCR_NML_TMPL="${PARMgfs}/gdas/snow/letkfoi/apply_incr_nml.j2" +export APPLY_INCR_NML_TMPL="${PARMgfs}/gdas/snow/apply_incr_nml.j2" export JEDI_FIX_YAML="${PARMgfs}/gdas/snow_jedi_fix.yaml.j2" +export VAR_BKG_STAGING_YAML="${PARMgfs}/gdas/staging/snow_var_bkg.yaml.j2" +export BERROR_STAGING_YAML="${PARMgfs}/gdas/staging/snow_berror.yaml.j2" + +export JEDI_CONFIG_YAML="${PARMgfs}/gdas/snowanl_jedi_config.yaml.j2" + +export JEDIEXE=${EXECgfs}/gdas.x export io_layout_x=@IO_LAYOUT_X@ export io_layout_y=@IO_LAYOUT_Y@ diff --git a/parm/gdas/esnowanl_jedi_config.yaml.j2 b/parm/gdas/esnowanl_jedi_config.yaml.j2 new file mode 100644 index 0000000000..ee0909f6db --- /dev/null +++ b/parm/gdas/esnowanl_jedi_config.yaml.j2 @@ -0,0 +1,14 @@ +esnowanlensmean: + rundir: '{{ DATA }}' + exe_src: '{{ EXECgfs }}/gdas.x' + mpi_cmd: '{{ APRUN_ESNOWANL }}' + jedi_args: ['fv3jedi', 'ensmean'] + jcb_base_yaml: '{{ PARMgfs }}/gdas/snow/jcb-base.yaml.j2' + jcb_algo: 'fv3jedi_snow_ensmean' +snowanlvar: + rundir: '{{ DATA }}' + exe_src: '{{ EXECgfs }}/gdas.x' + mpi_cmd: '{{ APRUN_ESNOWANL }}' + jedi_args: ['fv3jedi', 'variational'] + jcb_base_yaml: '{{ PARMgfs }}/gdas/snow/jcb-base.yaml.j2' + jcb_algo_yaml: '{{ JCB_ALGO_YAML_VAR }}' \ No newline at end of file diff --git a/parm/gdas/snow_stage_ens_update.yaml.j2 b/parm/gdas/snow_stage_ens_update.yaml.j2 index 4ad5499751..d8b1d42d00 100644 --- a/parm/gdas/snow_stage_ens_update.yaml.j2 +++ b/parm/gdas/snow_stage_ens_update.yaml.j2 @@ -10,45 +10,15 @@ # create working directories ###################################### mkdir: -- "{{ DATA }}/bkg/det" -- "{{ DATA }}/bkg/det_ensres" -- "{{ DATA }}/inc/det" -- "{{ DATA }}/inc/det_ensres" -- "{{ DATA }}//inc/ensmean" +- "{{ DATA }}/obs" +- "{{ DATA }}/bkg/ensmean" +- "{{ DATA }}/anl/ensmean" {% for mem in range(1, NMEM_ENS + 1) %} - "{{ DATA }}/bkg/mem{{ '%03d' % mem }}" - "{{ DATA }}/anl/mem{{ '%03d' % mem }}" {% endfor %} copy: ###################################### -# copy deterministic background files -###################################### -# define variables -# Declare a dict of search and replace terms to run on each template -{% set tmpl_dict = {'${ROTDIR}':ROTDIR, - '${RUN}':GDUMP, - '${YMD}':previous_cycle | to_YMD, - '${HH}':previous_cycle | strftime("%H"), - '${MEMDIR}':""} %} - -{% for tile in range(1, ntiles+1) %} -- ["{{ COM_ATMOS_RESTART_TMPL | replace_tmpl(tmpl_dict) }}/{{ bkg_time }}.sfc_data.tile{{ tile }}.nc", "{{ DATA }}/bkg/det/{{ bkg_time }}.sfc_data.tile{{ tile }}.nc"] -{% endfor %} -###################################### -# copy deterministic increment files -###################################### -# define variables -# Declare a dict of search and replace terms to run on each template -{% set tmpl_dict = {'${ROTDIR}':ROTDIR, - '${RUN}':GDUMP, - '${YMD}':current_cycle | to_YMD, - '${HH}':current_cycle | strftime("%H"), - '${MEMDIR}':""} %} - -{% for tile in range(1, ntiles+1) %} -- ["{{ COM_SNOW_ANALYSIS_TMPL | replace_tmpl(tmpl_dict) }}/snowinc.{{ current_cycle | to_fv3time }}.sfc_data.tile{{ tile }}.nc", "{{ DATA }}/inc/det/snowinc.{{ bkg_time }}.sfc_data.tile{{ tile }}.nc"] -{% endfor %} -###################################### # copy ensemble background files ###################################### {% for mem in range(1, NMEM_ENS + 1) %} @@ -60,6 +30,8 @@ copy: '${HH}':previous_cycle | strftime("%H"), '${MEMDIR}':"mem" + '%03d' % mem} %} + # copy coupler file +- ["{{ COM_ATMOS_RESTART_TMPL | replace_tmpl(tmpl_dict) }}/{{ current_cycle | to_fv3time }}.coupler.res", "{{ DATA }}/bkg/mem{{ '%03d' % mem }}/{{ current_cycle | to_fv3time }}.coupler.res"] # we need to copy them to two places, one serves as the basis for the analysis {% for tile in range(1, ntiles+1) %} - ["{{ COM_ATMOS_RESTART_TMPL | replace_tmpl(tmpl_dict) }}/{{ current_cycle | to_fv3time }}.sfc_data.tile{{ tile }}.nc", "{{ DATA }}/bkg/mem{{ '%03d' % mem }}/{{ current_cycle | to_fv3time }}.sfc_data.tile{{ tile }}.nc"] diff --git a/parm/gdas/snow_stage_orog.yaml.j2 b/parm/gdas/snow_stage_orog.yaml.j2 index 3cd7d5c327..f915b36d1f 100644 --- a/parm/gdas/snow_stage_orog.yaml.j2 +++ b/parm/gdas/snow_stage_orog.yaml.j2 @@ -1,12 +1,8 @@ mkdir: -- "{{ DATA }}/orog/det" - "{{ DATA }}/orog/ens" copy: -- ["{{ FIXorog }}/{{ CASE }}/{{ CASE }}_mosaic.nc", "{{ DATA }}/orog/det/{{ CASE }}_mosaic.nc"] - ["{{ FIXorog }}/{{ CASE_ENS }}/{{ CASE_ENS }}_mosaic.nc", "{{ DATA }}/orog/ens/{{ CASE_ENS }}_mosaic.nc"] {% for tile in range(1, ntiles+1) %} -- ["{{ FIXorog }}/{{ CASE }}/{{ CASE }}_grid.tile{{ tile }}.nc", "{{ DATA }}/orog/det/{{ CASE }}_grid.tile{{ tile }}.nc"] - ["{{ FIXorog }}/{{ CASE_ENS }}/{{ CASE_ENS }}_grid.tile{{ tile }}.nc", "{{ DATA }}/orog/ens/{{ CASE_ENS }}_grid.tile{{ tile }}.nc"] -- ["{{ FIXorog }}/{{ CASE }}/{{ CASE }}.mx{{ OCNRES }}_oro_data.tile{{ tile }}.nc", "{{ DATA }}/orog/det/{{ CASE }}.mx{{ OCNRES }}_oro_data.tile{{ tile }}.nc" ] - ["{{ FIXorog }}/{{ CASE_ENS }}/{{ CASE_ENS }}.mx{{ OCNRES }}_oro_data.tile{{ tile }}.nc", "{{ DATA }}/orog/ens/{{ CASE_ENS }}.mx{{ OCNRES }}_oro_data.tile{{ tile }}.nc" ] {% endfor %} diff --git a/parm/gdas/snowanl_jedi_config.yaml.j2 b/parm/gdas/snowanl_jedi_config.yaml.j2 new file mode 100644 index 0000000000..c599787592 --- /dev/null +++ b/parm/gdas/snowanl_jedi_config.yaml.j2 @@ -0,0 +1,7 @@ +snowanlvar: + rundir: '{{ DATA }}' + exe_src: '{{ EXECgfs }}/gdas.x' + mpi_cmd: '{{ APRUN_SNOWANL }}' + jedi_args: ['fv3jedi', 'variational'] + jcb_base_yaml: '{{ PARMgfs }}/gdas/snow/jcb-base.yaml.j2' + jcb_algo_yaml: '{{ JCB_ALGO_YAML_VAR }}' \ No newline at end of file diff --git a/parm/gdas/staging/snow_berror.yaml.j2 b/parm/gdas/staging/snow_berror.yaml.j2 new file mode 100644 index 0000000000..e230217300 --- /dev/null +++ b/parm/gdas/staging/snow_berror.yaml.j2 @@ -0,0 +1,4 @@ +mkdir: +- '{{ DATA }}/berror' +copy: +- ['{{ HOMEgfs }}/fix/gdas/snow/snow_bump_nicas_250km_shadowlevels_nicas.nc', '{{ DATA }}/berror'] diff --git a/parm/gdas/staging/snow_var_bkg.yaml.j2 b/parm/gdas/staging/snow_var_bkg.yaml.j2 new file mode 100644 index 0000000000..164fb3945e --- /dev/null +++ b/parm/gdas/staging/snow_var_bkg.yaml.j2 @@ -0,0 +1,8 @@ +mkdir: +- '{{ DATA }}/bkg' +copy: +- ['{{ COMIN_ATMOS_RESTART_PREV }}/{{ current_cycle | to_fv3time }}.coupler.res', '{{ DATA }}/bkg/'] +{% for tile in range(1, ntiles+1) %} +- ['{{ COMIN_ATMOS_RESTART_PREV }}/{{ current_cycle | to_fv3time }}.sfc_data.tile{{ tile }}.nc', '{{ DATA }}/bkg/'] +- ["{{ FIXorog }}/{{ CASE }}/{{ CASE }}.mx{{ OCNRES }}_oro_data.tile{{ tile }}.nc", "{{ DATA }}/bkg/{{ CASE }}.mx{{ OCNRES }}_oro_data.tile{{ tile }}.nc" ] +{% endfor %} \ No newline at end of file diff --git a/scripts/exgdas_enkf_earc.py b/scripts/exgdas_enkf_earc.py index 467cfa88dc..535dd2ea37 100755 --- a/scripts/exgdas_enkf_earc.py +++ b/scripts/exgdas_enkf_earc.py @@ -26,7 +26,7 @@ def main(): 'FHOUT_ENKF_GFS', 'FHMAX_ENKF', 'FHOUT_ENKF', 'ENKF_SPREAD', 'restart_interval_enkfgdas', 'restart_interval_enkfgfs', 'DOHYBVAR', 'DOIAU_ENKF', 'IAU_OFFSET', 'DOIAU', 'DO_CA', - 'DO_CALC_INCREMENT', 'assim_freq', 'ARCH_CYC', + 'DO_CALC_INCREMENT', 'assim_freq', 'ARCH_CYC', 'DO_JEDISNOWDA', 'ARCH_WARMICFREQ', 'ARCH_FCSTICFREQ', 'IAUFHRS_ENKF', 'NET'] diff --git a/scripts/exgdas_enkf_snow_recenter.py b/scripts/exgdas_enkf_snow_recenter.py deleted file mode 100755 index fcd501860c..0000000000 --- a/scripts/exgdas_enkf_snow_recenter.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 -# exgdas_enkf_snow_recenter.py -# This script creates an SnowEnsAnalysis class -# and will recenter the ensemble mean to the -# deterministic analysis and provide increments -# to create an ensemble of snow analyses -import os - -from wxflow import Logger, cast_strdict_as_dtypedict -from pygfs.task.snowens_analysis import SnowEnsAnalysis - -# Initialize root logger -logger = Logger(level=os.environ.get("LOGGING_LEVEL", "DEBUG"), colored_log=True) - - -if __name__ == '__main__': - - # Take configuration from environment and cast it as python dictionary - config = cast_strdict_as_dtypedict(os.environ) - - # Instantiate the snow ensemble analysis task - anl = SnowEnsAnalysis(config) - anl.initialize() - anl.genWeights() - anl.genMask() - anl.regridDetBkg() - anl.regridDetInc() - anl.recenterEns() - anl.addEnsIncrements() - anl.finalize() diff --git a/scripts/exglobal_snow_analysis.py b/scripts/exglobal_snow_analysis.py index dd52b699dc..df2c17530d 100755 --- a/scripts/exglobal_snow_analysis.py +++ b/scripts/exglobal_snow_analysis.py @@ -18,9 +18,20 @@ config = cast_strdict_as_dtypedict(os.environ) # Instantiate the snow analysis task - anl = SnowAnalysis(config) - if anl.task_config.cyc == 0: - anl.prepare_IMS() - anl.initialize() - anl.execute() - anl.finalize() + snow_anl = SnowAnalysis(config) + + # Initialize JEDI 2DVar snow analysis + snow_anl.initialize() + + # Process IMS snow cover (if applicable) + if snow_anl.task_config.cyc == 0: + snow_anl.prepare_IMS() + + # Execute JEDI snow analysis + snow_anl.execute('snowanlvar') + + # Add increments + snow_anl.add_increments() + + # Finalize JEDI snow analysis + snow_anl.finalize() diff --git a/scripts/exglobal_snowens_analysis.py b/scripts/exglobal_snowens_analysis.py new file mode 100755 index 0000000000..6e02fd0a16 --- /dev/null +++ b/scripts/exglobal_snowens_analysis.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# exglobal_snowens_analysis.py +# This script creates an SnowEnsAnalysis class, +# which will compute the ensemble mean of the snow forecast, +# run a 2DVar analysis, and provide increments +# to create an ensemble of snow analyses +import os + +from wxflow import Logger, cast_strdict_as_dtypedict +from pygfs.task.snowens_analysis import SnowEnsAnalysis + +# Initialize root logger +logger = Logger(level=os.environ.get("LOGGING_LEVEL", "DEBUG"), colored_log=True) + + +if __name__ == '__main__': + + # Take configuration from environment and cast it as python dictionary + config = cast_strdict_as_dtypedict(os.environ) + + # Instantiate the snow ensemble analysis task + snow_ens_anl = SnowEnsAnalysis(config) + + # Initialize JEDI 2DVar snow analysis + snow_ens_anl.initialize() + + # Calculate ensemble mean + snow_ens_anl.execute('esnowanlensmean') + + # stage ensemble mean backgrounds + + # Process IMS snow cover (if applicable) + if snow_ens_anl.task_config.cyc == 0: + snow_ens_anl.prepare_IMS() + + # Execute JEDI snow analysis + snow_ens_anl.execute('snowanlvar') + + # Add increments + snow_ens_anl.add_increments() + + # Finalize JEDI snow analysis + snow_ens_anl.finalize() diff --git a/sorc/gdas.cd b/sorc/gdas.cd index a2ea3770ae..d6097afdd4 160000 --- a/sorc/gdas.cd +++ b/sorc/gdas.cd @@ -1 +1 @@ -Subproject commit a2ea3770aeb9d4308bde51bb1d8c9c94cc9534c8 +Subproject commit d6097afdd435fe73cc99d8ddb594c3143b72820a diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index a89f070d41..b70b9e894f 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -209,7 +209,7 @@ if [[ -d "${HOMEgfs}/sorc/gdas.cd" ]]; then cd "${HOMEgfs}/fix" || exit 1 [[ ! -d gdas ]] && mkdir -p gdas cd gdas || exit 1 - for gdas_sub in fv3jedi gsibec obs soca aero; do + for gdas_sub in fv3jedi gsibec obs soca aero snow; do if [[ -d "${gdas_sub}" ]]; then rm -rf "${gdas_sub}" fi diff --git a/ush/python/pygfs/task/snow_analysis.py b/ush/python/pygfs/task/snow_analysis.py index 4b991d2b34..4e04799f3f 100644 --- a/ush/python/pygfs/task/snow_analysis.py +++ b/ush/python/pygfs/task/snow_analysis.py @@ -2,39 +2,59 @@ import os from logging import getLogger -from typing import Dict, List +from typing import Dict, List, Optional, Any from pprint import pformat +import glob +import gzip +import tarfile import numpy as np from netCDF4 import Dataset from wxflow import (AttrDict, FileHandler, to_fv3time, to_YMD, to_YMDH, to_timedelta, add_to_datetime, - rm_p, + rm_p, cp, parse_j2yaml, save_as_yaml, Jinja, + Task, logit, Executable, WorkflowException) -from pygfs.task.analysis import Analysis +from pygfs.jedi import Jedi logger = getLogger(__name__.split('.')[-1]) -class SnowAnalysis(Analysis): +class SnowAnalysis(Task): """ - Class for global snow analysis tasks + Class for JEDI-based global snow analysis tasks """ - NMEM_SNOWENS = 2 - @logit(logger, name="SnowAnalysis") - def __init__(self, config): + def __init__(self, config: Dict[str, Any]): + """Constructor global snow analysis task + + This method will construct a global snow analysis task. + This includes: + - extending the task_config attribute AttrDict to include parameters required for this task + - instantiate the Jedi attribute object + + Parameters + ---------- + config: Dict + dictionary object containing task configuration + + Returns + ---------- + None + """ super().__init__(config) _res = int(self.task_config['CASE'][1:]) _window_begin = add_to_datetime(self.task_config.current_cycle, -to_timedelta(f"{self.task_config['assim_freq']}H") / 2) - _letkfoi_yaml = os.path.join(self.task_config.DATA, f"{self.task_config.RUN}.t{self.task_config['cyc']:02d}z.letkfoi.yaml") + + # fix ocnres + self.task_config.OCNRES = f"{self.task_config.OCNRES:03d}" # Create a local dictionary that is repeatedly used across this class local_dict = AttrDict( @@ -47,13 +67,82 @@ def __init__(self, config): 'SNOW_WINDOW_LENGTH': f"PT{self.task_config['assim_freq']}H", 'OPREFIX': f"{self.task_config.RUN}.t{self.task_config.cyc:02d}z.", 'APREFIX': f"{self.task_config.RUN}.t{self.task_config.cyc:02d}z.", - 'jedi_yaml': _letkfoi_yaml + 'GPREFIX': f"gdas.t{self.task_config.previous_cycle.hour:02d}z.", + 'snow_obsdatain_path': os.path.join(self.task_config.DATA, 'obs'), + 'snow_obsdataout_path': os.path.join(self.task_config.DATA, 'diags'), + 'snow_bkg_path': os.path.join('.', 'bkg/'), } ) # Extend task_config with local_dict self.task_config = AttrDict(**self.task_config, **local_dict) + # Create JEDI object dictionary + expected_keys = ['snowanlvar'] + self.jedi_dict = Jedi.get_jedi_dict(self.task_config.JEDI_CONFIG_YAML, self.task_config, expected_keys) + + @logit(logger) + def initialize(self) -> None: + """Initialize a global snow analysis + + This method will initialize a global snow analysis. + This includes: + - initialize JEDI application + - staging model backgrounds + - staging observation files + - staging FV3-JEDI fix files + - staging B error files + - creating output directories + + Parameters + ---------- + None + + Returns + ---------- + None + """ + # initialize JEDI variational application + logger.info(f"Initializing JEDI variational DA application") + self.jedi_dict['snowanlvar'].initialize(self.task_config) + + # stage backgrounds + logger.info(f"Staging background files from {self.task_config.VAR_BKG_STAGING_YAML}") + bkg_staging_dict = parse_j2yaml(self.task_config.VAR_BKG_STAGING_YAML, self.task_config) + FileHandler(bkg_staging_dict).sync() + logger.debug(f"Background files:\n{pformat(bkg_staging_dict)}") + + # stage observations + logger.info(f"Staging list of observation files generated from JEDI config") + obs_dict = self.jedi_dict['snowanlvar'].render_jcb(self.task_config, 'snow_obs_staging') + FileHandler(obs_dict).sync() + logger.debug(f"Observation files:\n{pformat(obs_dict)}") + + # stage GTS bufr2ioda mapping YAML files + logger.info(f"Staging GTS bufr2ioda mapping YAML files from {self.task_config.GTS_SNOW_STAGE_YAML}") + gts_mapping_list = parse_j2yaml(self.task_config.GTS_SNOW_STAGE_YAML, self.task_config) + FileHandler(gts_mapping_list).sync() + + # stage FV3-JEDI fix files + logger.info(f"Staging JEDI fix files from {self.task_config.JEDI_FIX_YAML}") + jedi_fix_dict = parse_j2yaml(self.task_config.JEDI_FIX_YAML, self.task_config) + FileHandler(jedi_fix_dict).sync() + logger.debug(f"JEDI fix files:\n{pformat(jedi_fix_dict)}") + + # staging B error files + logger.info("Stage files for static background error") + berror_staging_dict = parse_j2yaml(self.task_config.BERROR_STAGING_YAML, self.task_config) + FileHandler(berror_staging_dict).sync() + logger.debug(f"Background error files:\n{pformat(berror_staging_dict)}") + + # need output dir for diags and anl + logger.debug("Create empty output [anl, diags] directories to receive output from executable") + newdirs = [ + os.path.join(self.task_config.DATA, 'anl'), + os.path.join(self.task_config.DATA, 'diags'), + ] + FileHandler({'mkdir': newdirs}).sync() + @logit(logger) def prepare_IMS(self) -> None: """Prepare the IMS data for a global snow analysis @@ -75,21 +164,19 @@ def prepare_IMS(self) -> None: # create a temporary dict of all keys needed in this method localconf = AttrDict() - keys = ['DATA', 'current_cycle', 'COM_OBS', 'COM_ATMOS_RESTART_PREV', + keys = ['DATA', 'current_cycle', 'COMIN_OBS', 'COMIN_ATMOS_RESTART_PREV', 'OPREFIX', 'CASE', 'OCNRES', 'ntiles', 'FIXgfs'] for key in keys: localconf[key] = self.task_config[key] - # stage backgrounds - logger.info("Staging backgrounds") - FileHandler(self.get_bkg_dict(localconf)).sync() + localconf['ims_fcst_path'] = self.task_config['snow_bkg_path'] # Read and render the IMS_OBS_LIST yaml logger.info(f"Reading {self.task_config.IMS_OBS_LIST}") prep_ims_config = parse_j2yaml(self.task_config.IMS_OBS_LIST, localconf) logger.debug(f"{self.task_config.IMS_OBS_LIST}:\n{pformat(prep_ims_config)}") - # copy the IMS obs files from COM_OBS to DATA/obs + # copy the IMS obs files from COMIN_OBS to DATA/obs logger.info("Copying IMS obs for CALCFIMSEXE") FileHandler(prep_ims_config.calcfims).sync() @@ -116,9 +203,11 @@ def prepare_IMS(self) -> None: try: exe() except OSError: - raise OSError(f"Failed to execute {exe}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exe}") + logger.exception(f"Failed to execute {exe}") + raise + except Exception as err: + logger.exception(f"An error occured during execution of {exe}") + raise WorkflowException(f"An error occured during execution of {exe}") from err # Ensure the snow depth IMS file is produced by the above executable input_file = f"IMSscf.{to_YMD(localconf.current_cycle)}.{localconf.CASE}_oro_data.nc" @@ -140,121 +229,38 @@ def prepare_IMS(self) -> None: logger.debug(f"Executing {exe}") exe() except OSError: - raise OSError(f"Failed to execute {exe}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exe}") + logger.exception(f"Failed to execute {exe}") + raise + except Exception as err: + logger.exception(f"An error occured during execution of {exe}") + raise WorkflowException(f"An error occured during execution of {exe}") from err # Ensure the IODA snow depth IMS file is produced by the IODA converter - # If so, copy to COM_OBS/ + # If so, copy to DATA/obs/ if not os.path.isfile(f"{os.path.join(localconf.DATA, output_file)}"): logger.exception(f"{self.task_config.IMS2IODACONV} failed to produce {output_file}") raise FileNotFoundError(f"{os.path.join(localconf.DATA, output_file)}") else: - logger.info(f"Copy {output_file} to {self.task_config.COM_OBS}") + logger.info(f"Copy {output_file} to {os.path.join(localconf.DATA, 'obs')}") FileHandler(prep_ims_config.ims2ioda).sync() @logit(logger) - def initialize(self) -> None: - """Initialize method for snow analysis - This method: - - creates artifacts in the DATA directory by copying fix files - - creates the JEDI LETKF yaml from the template - - stages backgrounds, observations and ensemble members + def execute(self, jedi_dict_key: str) -> None: + """Run JEDI executable + + This method will run JEDI executables for the global snow analysis Parameters ---------- - self : Analysis - Instance of the SnowAnalysis object - """ - - super().initialize() - - # create a temporary dict of all keys needed in this method - localconf = AttrDict() - keys = ['PARMgfs', 'DATA', 'current_cycle', 'COM_OBS', 'COM_ATMOS_RESTART_PREV', - 'OPREFIX', 'CASE', 'OCNRES', 'ntiles'] - for key in keys: - localconf[key] = self.task_config[key] - - # Make member directories in DATA for background - dirlist = [] - for imem in range(1, SnowAnalysis.NMEM_SNOWENS + 1): - dirlist.append(os.path.join(localconf.DATA, 'bkg', f'mem{imem:03d}')) - FileHandler({'mkdir': dirlist}).sync() - - # stage fix files - logger.info(f"Staging JEDI fix files from {self.task_config.JEDI_FIX_YAML}") - jedi_fix_list = parse_j2yaml(self.task_config.JEDI_FIX_YAML, self.task_config) - FileHandler(jedi_fix_list).sync() - - # stage backgrounds - logger.info("Staging ensemble backgrounds") - FileHandler(self.get_ens_bkg_dict(localconf)).sync() - - # stage GTS bufr2ioda mapping YAML files - logger.info(f"Staging GTS bufr2ioda mapping YAML files from {self.task_config.GTS_SNOW_STAGE_YAML}") - gts_mapping_list = parse_j2yaml(self.task_config.GTS_SNOW_STAGE_YAML, localconf) - FileHandler(gts_mapping_list).sync() - - # Write out letkfoi YAML file - save_as_yaml(self.task_config.jedi_config, self.task_config.jedi_yaml) - logger.info(f"Wrote letkfoi YAML to: {self.task_config.jedi_yaml}") - - # need output dir for diags and anl - logger.info("Create empty output [anl, diags] directories to receive output from executable") - newdirs = [ - os.path.join(localconf.DATA, "anl"), - os.path.join(localconf.DATA, "diags"), - ] - FileHandler({'mkdir': newdirs}).sync() + jedi_dict_key + key specifying particular Jedi object in self.jedi_dict - @logit(logger) - def execute(self) -> None: - """Run a series of tasks to create Snow analysis - This method: - - creates an 2 member ensemble - - runs the JEDI LETKF executable to produce increments - - creates analysis from increments - - Parameters + Returns ---------- - self : Analysis - Instance of the SnowAnalysis object + None """ - # create a temporary dict of all keys needed in this method - localconf = AttrDict() - keys = ['HOMEgfs', 'DATA', 'current_cycle', - 'COM_ATMOS_RESTART_PREV', 'COM_SNOW_ANALYSIS', 'APREFIX', - 'SNOWDEPTHVAR', 'BESTDDEV', 'CASE', 'OCNRES', 'ntiles', - 'APRUN_SNOWANL', 'JEDIEXE', 'jedi_yaml', 'DOIAU', 'SNOW_WINDOW_BEGIN', - 'APPLY_INCR_NML_TMPL', 'APPLY_INCR_EXE', 'APRUN_APPLY_INCR'] - for key in keys: - localconf[key] = self.task_config[key] - - logger.info("Creating ensemble") - self.create_ensemble(localconf.SNOWDEPTHVAR, - localconf.BESTDDEV, - AttrDict({key: localconf[key] for key in ['DATA', 'ntiles', 'current_cycle']})) - - logger.info("Running JEDI LETKF") - exec_cmd = Executable(localconf.APRUN_SNOWANL) - exec_name = os.path.join(localconf.DATA, 'gdas.x') - exec_cmd.add_default_arg(exec_name) - exec_cmd.add_default_arg('fv3jedi') - exec_cmd.add_default_arg('localensembleda') - exec_cmd.add_default_arg(localconf.jedi_yaml) - - try: - logger.debug(f"Executing {exec_cmd}") - exec_cmd() - except OSError: - raise OSError(f"Failed to execute {exec_cmd}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exec_cmd}") - - logger.info("Creating analysis from backgrounds and increments") - self.add_increments(localconf) + self.jedi_dict[jedi_dict_key].execute() @logit(logger) def finalize(self) -> None: @@ -271,18 +277,41 @@ def finalize(self) -> None: Instance of the SnowAnalysis object """ - logger.info("Create diagnostic tarball of diag*.nc4 files") - statfile = os.path.join(self.task_config.COM_SNOW_ANALYSIS, f"{self.task_config.APREFIX}snowstat.tgz") - self.tgz_diags(statfile, self.task_config.DATA) - - logger.info("Copy full YAML to COM") - src = os.path.join(self.task_config['DATA'], f"{self.task_config.APREFIX}letkfoi.yaml") - dest = os.path.join(self.task_config.COM_CONF, f"{self.task_config.APREFIX}letkfoi.yaml") - yaml_copy = { - 'mkdir': [self.task_config.COM_CONF], - 'copy': [[src, dest]] - } - FileHandler(yaml_copy).sync() + # ---- tar up diags + # path of output tar statfile + snowstat = os.path.join(self.task_config.COMOUT_SNOW_ANALYSIS, f"{self.task_config.APREFIX}snowstat.tgz") + + # get list of diag files to put in tarball + diags = glob.glob(os.path.join(self.task_config.DATA, 'diags', 'diag*nc')) + + logger.info(f"Compressing {len(diags)} diag files to {snowstat}") + + # gzip the files first + logger.debug(f"Gzipping {len(diags)} diag files") + for diagfile in diags: + with open(diagfile, 'rb') as f_in, gzip.open(f"{diagfile}.gz", 'wb') as f_out: + f_out.writelines(f_in) + + # open tar file for writing + logger.debug(f"Creating tar file {snowstat} with {len(diags)} gzipped diag files") + with tarfile.open(snowstat, "w|gz") as archive: + for diagfile in diags: + diaggzip = f"{diagfile}.gz" + archive.add(diaggzip, arcname=os.path.basename(diaggzip)) + + # get list of yamls to copy to ROTDIR + yamls = glob.glob(os.path.join(self.task_config.DATA, '*snow*yaml')) + + # copy full YAML from executable to ROTDIR + for src in yamls: + yaml_base = os.path.splitext(os.path.basename(src))[0] + dest_yaml_name = f"{self.task_config.RUN}.t{self.task_config.cyc:02d}z.{yaml_base}.yaml" + dest = os.path.join(self.task_config.COMOUT_CONF, dest_yaml_name) + logger.debug(f"Copying {src} to {dest}") + yaml_copy = { + 'copy': [[src, dest]] + } + FileHandler(yaml_copy).sync() logger.info("Copy analysis to COM") bkgtimes = [] @@ -296,7 +325,7 @@ def finalize(self) -> None: for itile in range(1, self.task_config.ntiles + 1): filename = template.format(tilenum=itile) src = os.path.join(self.task_config.DATA, 'anl', filename) - dest = os.path.join(self.task_config.COM_SNOW_ANALYSIS, filename) + dest = os.path.join(self.task_config.COMOUT_SNOW_ANALYSIS, filename) anllist.append([src, dest]) FileHandler({'copy': anllist}).sync() @@ -306,214 +335,47 @@ def finalize(self) -> None: for itile in range(1, self.task_config.ntiles + 1): filename = template.format(tilenum=itile) src = os.path.join(self.task_config.DATA, 'anl', filename) - dest = os.path.join(self.task_config.COM_SNOW_ANALYSIS, filename) + dest = os.path.join(self.task_config.COMOUT_SNOW_ANALYSIS, filename) inclist.append([src, dest]) FileHandler({'copy': inclist}).sync() - @staticmethod - @logit(logger) - def get_bkg_dict(config: Dict) -> Dict[str, List[str]]: - """Compile a dictionary of model background files to copy - - This method constructs a dictionary of FV3 RESTART files (coupler, sfc_data) - that are needed for global snow DA and returns said dictionary for use by the FileHandler class. - - Parameters - ---------- - config: Dict - Dictionary of key-value pairs needed in this method - Should contain the following keys: - COM_ATMOS_RESTART_PREV - DATA - current_cycle - ntiles - - Returns - ---------- - bkg_dict: Dict - a dictionary containing the list of model background files to copy for FileHandler - """ - # NOTE for now this is FV3 RESTART files and just assumed to be fh006 - - # get FV3 sfc_data RESTART files, this will be a lot simpler when using history files - rst_dir = os.path.join(config.COM_ATMOS_RESTART_PREV) # for now, option later? - run_dir = os.path.join(config.DATA, 'bkg') - - # Start accumulating list of background files to copy - bkglist = [] - - # snow DA needs coupler - basename = f'{to_fv3time(config.current_cycle)}.coupler.res' - bkglist.append([os.path.join(rst_dir, basename), os.path.join(run_dir, basename)]) - - # snow DA only needs sfc_data - for ftype in ['sfc_data']: - template = f'{to_fv3time(config.current_cycle)}.{ftype}.tile{{tilenum}}.nc' - for itile in range(1, config.ntiles + 1): - basename = template.format(tilenum=itile) - bkglist.append([os.path.join(rst_dir, basename), os.path.join(run_dir, basename)]) - - bkg_dict = { - 'mkdir': [run_dir], - 'copy': bkglist - } - return bkg_dict - - @staticmethod - @logit(logger) - def get_ens_bkg_dict(config: Dict) -> Dict: - """Compile a dictionary of model background files to copy for the ensemble - Note that a "Fake" 2-member ensemble backgroud is being created by copying FV3 RESTART files (coupler, sfc_data) - from the deterministic background to DATA/bkg/mem001, 002. - - Parameters - ---------- - config: Dict - Dictionary of key-value pairs needed in this method - Should contain the following keys: - COM_ATMOS_RESTART_PREV - DATA - current_cycle - ntiles - - Returns - ---------- - bkg_dict: Dict - a dictionary containing the list of model background files to copy for FileHandler - """ - - dirlist = [] - bkglist = [] - - # get FV3 sfc_data RESTART files; Note an ensemble is being created - rst_dir = os.path.join(config.COM_ATMOS_RESTART_PREV) - - for imem in range(1, SnowAnalysis.NMEM_SNOWENS + 1): - memchar = f"mem{imem:03d}" - - run_dir = os.path.join(config.DATA, 'bkg', memchar, 'RESTART') - dirlist.append(run_dir) - - # Snow DA needs coupler - basename = f'{to_fv3time(config.current_cycle)}.coupler.res' - bkglist.append([os.path.join(rst_dir, basename), os.path.join(run_dir, basename)]) - - # Snow DA only needs sfc_data - for ftype in ['sfc_data']: - template = f'{to_fv3time(config.current_cycle)}.{ftype}.tile{{tilenum}}.nc' - for itile in range(1, config.ntiles + 1): - basename = template.format(tilenum=itile) - bkglist.append([os.path.join(rst_dir, basename), os.path.join(run_dir, basename)]) - - bkg_dict = { - 'mkdir': dirlist, - 'copy': bkglist - } - - return bkg_dict - - @staticmethod - @logit(logger) - def create_ensemble(vname: str, bestddev: float, config: Dict) -> None: - """Create a 2-member ensemble for Snow Depth analysis by perturbing snow depth with a prescribed variance. - Additionally, remove glacier locations - - Parameters - ---------- - vname : str - snow depth variable to perturb: "snodl" - bestddev : float - Background Error Standard Deviation to perturb around to create ensemble - config: Dict - Dictionary of key-value pairs needed in this method. It must contain the following keys: - DATA - current_cycle - ntiles - """ - - # 2 ens members - offset = bestddev / np.sqrt(SnowAnalysis.NMEM_SNOWENS) - - logger.info(f"Creating ensemble for LETKFOI by offsetting with {offset}") - - workdir = os.path.join(config.DATA, 'bkg') - - sign = [1, -1] - ens_dirs = ['mem001', 'mem002'] - - for (memchar, value) in zip(ens_dirs, sign): - logger.debug(f"creating ensemble member {memchar} with sign {value}") - for tt in range(1, config.ntiles + 1): - logger.debug(f"perturbing tile {tt}") - # open file - out_netcdf = os.path.join(workdir, memchar, 'RESTART', f"{to_fv3time(config.current_cycle)}.sfc_data.tile{tt}.nc") - logger.debug(f"creating member {out_netcdf}") - with Dataset(out_netcdf, "r+") as ncOut: - slmsk_array = ncOut.variables['slmsk'][:] - vtype_array = ncOut.variables['vtype'][:] - slmsk_array[vtype_array == 15] = 0 # remove glacier locations - var_array = ncOut.variables[vname][:] - var_array[slmsk_array == 1] = var_array[slmsk_array == 1] + value * offset - ncOut.variables[vname][0, :, :] = var_array[:] - - @staticmethod @logit(logger) - def add_increments(config: Dict) -> None: + def add_increments(self) -> None: """Executes the program "apply_incr.exe" to create analysis "sfc_data" files by adding increments to backgrounds Parameters ---------- - config: Dict - Dictionary of key-value pairs needed in this method - Should contain the following keys: - HOMEgfs - COM_ATMOS_RESTART_PREV - DATA - current_cycle - CASE - OCNRES - ntiles - APPLY_INCR_NML_TMPL - APPLY_INCR_EXE - APRUN_APPLY_INCR - DOIAU - SNOW_WINDOW_BEGIN - - Raises - ------ - OSError - Failure due to OS issues - WorkflowException - All other exceptions + self : Analysis + Instance of the SnowAnalysis object """ # need backgrounds to create analysis from increments after LETKF logger.info("Copy backgrounds into anl/ directory for creating analysis from increments") bkgtimes = [] - if config.DOIAU: + if self.task_config.DOIAU: # want analysis at beginning and middle of window - bkgtimes.append(config.SNOW_WINDOW_BEGIN) - bkgtimes.append(config.current_cycle) + bkgtimes.append(self.task_config.SNOW_WINDOW_BEGIN) + bkgtimes.append(self.task_config.current_cycle) anllist = [] for bkgtime in bkgtimes: template = f'{to_fv3time(bkgtime)}.sfc_data.tile{{tilenum}}.nc' - for itile in range(1, config.ntiles + 1): + for itile in range(1, self.task_config.ntiles + 1): filename = template.format(tilenum=itile) - src = os.path.join(config.COM_ATMOS_RESTART_PREV, filename) - dest = os.path.join(config.DATA, "anl", filename) + src = os.path.join(self.task_config.COMIN_ATMOS_RESTART_PREV, filename) + dest = os.path.join(self.task_config.DATA, "anl", filename) anllist.append([src, dest]) FileHandler({'copy': anllist}).sync() - if config.DOIAU: + if self.task_config.DOIAU: logger.info("Copying increments to beginning of window") - template_in = f'snowinc.{to_fv3time(config.current_cycle)}.sfc_data.tile{{tilenum}}.nc' - template_out = f'snowinc.{to_fv3time(config.SNOW_WINDOW_BEGIN)}.sfc_data.tile{{tilenum}}.nc' + template_in = f'snowinc.{to_fv3time(self.task_config.current_cycle)}.sfc_data.tile{{tilenum}}.nc' + template_out = f'snowinc.{to_fv3time(self.task_config.SNOW_WINDOW_BEGIN)}.sfc_data.tile{{tilenum}}.nc' inclist = [] - for itile in range(1, config.ntiles + 1): + for itile in range(1, self.task_config.ntiles + 1): filename_in = template_in.format(tilenum=itile) filename_out = template_out.format(tilenum=itile) - src = os.path.join(config.DATA, 'anl', filename_in) - dest = os.path.join(config.DATA, 'anl', filename_out) + src = os.path.join(self.task_config.DATA, 'anl', filename_in) + dest = os.path.join(self.task_config.DATA, 'anl', filename_out) inclist.append([src, dest]) FileHandler({'copy': inclist}).sync() @@ -521,35 +383,37 @@ def add_increments(config: Dict) -> None: for bkgtime in bkgtimes: logger.info("Processing analysis valid: {bkgtime}") logger.info("Create namelist for APPLY_INCR_EXE") - nml_template = config.APPLY_INCR_NML_TMPL + nml_template = self.task_config.APPLY_INCR_NML_TMPL nml_config = { 'current_cycle': bkgtime, - 'CASE': config.CASE, - 'DATA': config.DATA, - 'HOMEgfs': config.HOMEgfs, - 'OCNRES': config.OCNRES, + 'CASE': self.task_config.CASE, + 'DATA': self.task_config.DATA, + 'HOMEgfs': self.task_config.HOMEgfs, + 'OCNRES': self.task_config.OCNRES, } nml_data = Jinja(nml_template, nml_config).render logger.debug(f"apply_incr_nml:\n{nml_data}") - nml_file = os.path.join(config.DATA, "apply_incr_nml") + nml_file = os.path.join(self.task_config.DATA, "apply_incr_nml") with open(nml_file, "w") as fho: fho.write(nml_data) logger.info("Link APPLY_INCR_EXE into DATA/") - exe_src = config.APPLY_INCR_EXE - exe_dest = os.path.join(config.DATA, os.path.basename(exe_src)) + exe_src = self.task_config.APPLY_INCR_EXE + exe_dest = os.path.join(self.task_config.DATA, os.path.basename(exe_src)) if os.path.exists(exe_dest): rm_p(exe_dest) os.symlink(exe_src, exe_dest) # execute APPLY_INCR_EXE to create analysis files - exe = Executable(config.APRUN_APPLY_INCR) - exe.add_default_arg(os.path.join(config.DATA, os.path.basename(exe_src))) + exe = Executable(self.task_config.APRUN_APPLY_INCR) + exe.add_default_arg(os.path.join(self.task_config.DATA, os.path.basename(exe_src))) logger.info(f"Executing {exe}") try: exe() except OSError: - raise OSError(f"Failed to execute {exe}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exe}") + logger.exception(f"Failed to execute {exe}") + raise + except Exception as err: + logger.exception(f"An error occured during execution of {exe}") + raise WorkflowException(f"An error occured during execution of {exe}") from err diff --git a/ush/python/pygfs/task/snowens_analysis.py b/ush/python/pygfs/task/snowens_analysis.py index 982f74130c..18073af6b9 100644 --- a/ush/python/pygfs/task/snowens_analysis.py +++ b/ush/python/pygfs/task/snowens_analysis.py @@ -2,283 +2,371 @@ import os from logging import getLogger -from typing import Dict, List, Any -import netCDF4 as nc +from typing import Dict, List, Optional, Any +from pprint import pformat +import glob +import gzip +import tarfile import numpy as np +from netCDF4 import Dataset from wxflow import (AttrDict, FileHandler, - to_fv3time, to_timedelta, add_to_datetime, - rm_p, chdir, + to_fv3time, to_YMD, to_YMDH, to_timedelta, add_to_datetime, + rm_p, cp, parse_j2yaml, save_as_yaml, Jinja, + Task, logit, Executable, WorkflowException) -from pygfs.task.analysis import Analysis +from pygfs.jedi import Jedi logger = getLogger(__name__.split('.')[-1]) -class SnowEnsAnalysis(Analysis): +class SnowEnsAnalysis(Task): """ - Class for global ensemble snow analysis tasks + Class for JEDI-based global snow ensemble analysis tasks """ @logit(logger, name="SnowEnsAnalysis") - def __init__(self, config): + def __init__(self, config: Dict[str, Any]): + """Constructor global snow ensemble analysis task + + This method will construct a global snow ensemble analysis task. + This includes: + - extending the task_config attribute AttrDict to include parameters required for this task + - instantiate the Jedi attribute object + + Parameters + ---------- + config: Dict + dictionary object containing task configuration + + Returns + ---------- + None + """ super().__init__(config) - _res_det = int(self.task_config['CASE'][1:]) - _res_ens = int(self.task_config['CASE_ENS'][1:]) + _res = int(self.task_config['CASE_ENS'][1:]) + self.task_config['CASE'] = self.task_config['CASE_ENS'] _window_begin = add_to_datetime(self.task_config.current_cycle, -to_timedelta(f"{self.task_config['assim_freq']}H") / 2) - _recenter_yaml = os.path.join(self.task_config.DATA, f"{self.task_config.RUN}.t{self.task_config['cyc']:02d}z.land_recenter.yaml") + + # fix ocnres + self.task_config.OCNRES = f"{self.task_config.OCNRES :03d}" # Create a local dictionary that is repeatedly used across this class local_dict = AttrDict( { - 'npx_ges': _res_ens + 1, - 'npy_ges': _res_ens + 1, + 'npx_ges': _res + 1, + 'npy_ges': _res + 1, 'npz_ges': self.task_config.LEVS - 1, 'npz': self.task_config.LEVS - 1, 'SNOW_WINDOW_BEGIN': _window_begin, 'SNOW_WINDOW_LENGTH': f"PT{self.task_config['assim_freq']}H", - 'ATM_WINDOW_BEGIN': _window_begin, - 'ATM_WINDOW_LENGTH': f"PT{self.task_config['assim_freq']}H", - 'OPREFIX': f"{self.task_config.RUN}.t{self.task_config.cyc:02d}z.", + 'OPREFIX': f"{self.task_config.CDUMP}.t{self.task_config.cyc:02d}z.", 'APREFIX': f"{self.task_config.RUN}.t{self.task_config.cyc:02d}z.", - 'jedi_yaml': _recenter_yaml, + 'GPREFIX': f"gdas.t{self.task_config.previous_cycle.hour:02d}z.", + 'snow_obsdatain_path': os.path.join(self.task_config.DATA, 'obs'), + 'snow_obsdataout_path': os.path.join(self.task_config.DATA, 'diags'), + 'snow_bkg_path': os.path.join('.', 'bkg', 'ensmean/'), } ) - bkg_time = _window_begin if self.task_config.DOIAU else self.task_config.current_cycle - local_dict['bkg_time'] = bkg_time - # task_config is everything that this task should need + # Extend task_config with local_dict self.task_config = AttrDict(**self.task_config, **local_dict) + # Create JEDI object dictionary + expected_keys = ['snowanlvar', 'esnowanlensmean'] + self.jedi_dict = Jedi.get_jedi_dict(self.task_config.JEDI_CONFIG_YAML, self.task_config, expected_keys) + @logit(logger) def initialize(self) -> None: - """Initialize method for snow ensemble analysis - This method: + """Initialize a global snow ensemble analysis + This method will initialize a global snow ensemble analysis. + This includes: + - initialize JEDI applications + - staging model backgrounds + - staging observation files + - staging FV3-JEDI fix files + - staging B error files + - creating output directories Parameters ---------- - self : Analysis - Instance of the SnowEnsAnalysis object - """ - - super().initialize() + None - # stage background and increment files - logger.info(f"Staging files from {self.task_config.SNOW_ENS_STAGE_TMPL}") - snow_stage_list = parse_j2yaml(self.task_config.SNOW_ENS_STAGE_TMPL, self.task_config) - FileHandler(snow_stage_list).sync() - - # stage orography files - logger.info(f"Staging orography files specified in {self.task_config.SNOW_OROG_STAGE_TMPL}") - snow_orog_stage_list = parse_j2yaml(self.task_config.SNOW_OROG_STAGE_TMPL, self.task_config) - FileHandler(snow_orog_stage_list).sync() - - # stage fix files for fv3-jedi + Returns + ---------- + None + """ + # initialize JEDI variational application + logger.info(f"Initializing JEDI variational DA application") + self.jedi_dict['snowanlvar'].initialize(self.task_config) + + # initialize ensemble mean computation + logger.info(f"Initializing JEDI ensemble mean application") + self.jedi_dict['esnowanlensmean'].initialize(self.task_config) + + # stage backgrounds + logger.info(f"Staging background files from {self.task_config.SNOW_ENS_STAGE_TMPL}") + bkg_staging_dict = parse_j2yaml(self.task_config.SNOW_ENS_STAGE_TMPL, self.task_config) + FileHandler(bkg_staging_dict).sync() + logger.debug(f"Background files:\n{pformat(bkg_staging_dict)}") + + # stage orography + logger.info(f"Staging orography files from {self.task_config.SNOW_OROG_STAGE_TMPL}") + orog_staging_dict = parse_j2yaml(self.task_config.SNOW_OROG_STAGE_TMPL, self.task_config) + FileHandler(orog_staging_dict).sync() + logger.debug(f"Orography files:\n{pformat(orog_staging_dict)}") + # note JEDI will try to read the orog files for each member, let's just symlink + logger.info("Linking orography files for each member") + oro_files = glob.glob(os.path.join(self.task_config.DATA, 'orog', 'ens', '*')) + for mem in range(1, self.task_config.NMEM_ENS + 1): + dest = os.path.join(self.task_config.DATA, 'bkg', f"mem{mem:03}") + for oro_file in oro_files: + os.symlink(oro_file, os.path.join(dest, os.path.basename(oro_file))) + # need to symlink orography files for the ensmean too + dest = os.path.join(self.task_config.DATA, 'bkg', 'ensmean') + for oro_file in oro_files: + os.symlink(oro_file, os.path.join(dest, os.path.basename(oro_file))) + + # stage observations + logger.info(f"Staging list of observation files generated from JEDI config") + obs_dict = self.jedi_dict['snowanlvar'].render_jcb(self.task_config, 'snow_obs_staging') + FileHandler(obs_dict).sync() + logger.debug(f"Observation files:\n{pformat(obs_dict)}") + + # stage GTS bufr2ioda mapping YAML files + logger.info(f"Staging GTS bufr2ioda mapping YAML files from {self.task_config.GTS_SNOW_STAGE_YAML}") + gts_mapping_list = parse_j2yaml(self.task_config.GTS_SNOW_STAGE_YAML, self.task_config) + FileHandler(gts_mapping_list).sync() + + # stage FV3-JEDI fix files logger.info(f"Staging JEDI fix files from {self.task_config.JEDI_FIX_YAML}") - jedi_fix_list = parse_j2yaml(self.task_config.JEDI_FIX_YAML, self.task_config) - FileHandler(jedi_fix_list).sync() - - # write land ensemble recentering YAML - save_as_yaml(self.task_config.jedi_config, self.task_config.jedi_yaml) - logger.info(f"Wrote recentering YAML to: {self.task_config.jedi_yaml}") - - # link recentering executable - # placeholder, currently already done by the analysis parent class - - # copy fregrid executable - fregrid_copy = {'copy': [[os.path.join(self.task_config.EXECgfs, 'fregrid'), os.path.join(self.task_config.DATA, 'fregrid.x')]]} - FileHandler(fregrid_copy).sync() + jedi_fix_dict = parse_j2yaml(self.task_config.JEDI_FIX_YAML, self.task_config) + FileHandler(jedi_fix_dict).sync() + logger.debug(f"JEDI fix files:\n{pformat(jedi_fix_dict)}") + + # staging B error files + logger.info("Stage files for static background error") + berror_staging_dict = parse_j2yaml(self.task_config.BERROR_STAGING_YAML, self.task_config) + FileHandler(berror_staging_dict).sync() + logger.debug(f"Background error files:\n{pformat(berror_staging_dict)}") + + # need output dir for diags and anl + logger.debug("Create empty output [anl, diags] directories to receive output from executable") + newdirs = [ + os.path.join(self.task_config.DATA, 'anl'), + os.path.join(self.task_config.DATA, 'diags'), + ] + FileHandler({'mkdir': newdirs}).sync() @logit(logger) - def genWeights(self) -> None: - """Create a modified land_frac file for use by fregrid - to interpolate the snow background from det to ensres + def prepare_IMS(self) -> None: + """Prepare the IMS data for a global snow analysis + + This method will prepare IMS data for a global snow analysis using JEDI. + This includes: + - staging model backgrounds + - processing raw IMS observation data and prepare for conversion to IODA + - creating IMS snowdepth data in IODA format. Parameters ---------- - self : Analysis - Instance of the SnowEnsAnalysis object - """ - - chdir(self.task_config.DATA) - - # loop through tiles - for tile in range(1, self.task_config.ntiles + 1): - # open the restart and get the vegetation type - rst = nc.Dataset(f"./bkg/det/{to_fv3time(self.task_config.bkg_time)}.sfc_data.tile{tile}.nc") - vtype = rst.variables['vtype'][:] - rst.close() - # open the oro data and get the land fraction - oro = nc.Dataset(f"./orog/det/{self.task_config.CASE}.mx{self.task_config.OCNRES}_oro_data.tile{tile}.nc") - land_frac = oro.variables['land_frac'][:] - oro.close() - # create an output file - ncfile = nc.Dataset(f"./orog/det/{self.task_config.CASE}.mx{self.task_config.OCNRES}_interp_weight.tile{tile}.nc", mode='w', format='NETCDF4') - case_int = int(self.task_config.CASE[1:]) - lon = ncfile.createDimension('lon', case_int) - lat = ncfile.createDimension('lat', case_int) - lsm_frac_out = ncfile.createVariable('lsm_frac', np.float32, ('lon', 'lat')) - # set the land fraction to 0 on glaciers to not interpolate that snow - glacier = 15 - land_frac[np.where(vtype[0, ...] == glacier)] = 0 - lsm_frac_out[:] = land_frac - # write out and close the file - ncfile.close() - - @logit(logger) - def genMask(self) -> None: - """Create a mask for use by JEDI - to mask out snow increments on non-LSM gridpoints + Analysis: parent class for GDAS task - Parameters + Returns ---------- - self : Analysis - Instance of the SnowEnsAnalysis object + None """ - chdir(self.task_config.DATA) - - # loop through tiles - for tile in range(1, self.task_config.ntiles + 1): - # open the restart and get the vegetation type - rst = nc.Dataset(f"./bkg/mem001/{to_fv3time(self.task_config.bkg_time)}.sfc_data.tile{tile}.nc", mode="r+") - vtype = rst.variables['vtype'][:] - slmsk = rst.variables['slmsk'][:] - # slmsk(Time, yaxis_1, xaxis_1) - # set the mask to 3 on glaciers - glacier = 15 - slmsk[np.where(vtype == glacier)] = 3 - # write out and close the file - rst.variables['slmsk'][:] = slmsk - rst.close() - - @logit(logger) - def regridDetBkg(self) -> None: - """Run fregrid to regrid the deterministic snow background - to the ensemble resolution + # create a temporary dict of all keys needed in this method + localconf = AttrDict() + keys = ['DATA', 'current_cycle', 'COMIN_OBS', + 'OPREFIX', 'CASE', 'OCNRES', 'ntiles', 'FIXgfs'] + for key in keys: + localconf[key] = self.task_config[key] + + localconf['ims_fcst_path'] = self.task_config['snow_bkg_path'] + # Read and render the IMS_OBS_LIST yaml + logger.info(f"Reading {self.task_config.IMS_OBS_LIST}") + prep_ims_config = parse_j2yaml(self.task_config.IMS_OBS_LIST, localconf) + logger.debug(f"{self.task_config.IMS_OBS_LIST}:\n{pformat(prep_ims_config)}") + + # copy the IMS obs files from COMIN_OBS to DATA/obs + logger.info("Copying IMS obs for CALCFIMSEXE") + FileHandler(prep_ims_config.calcfims).sync() + + logger.info("Create namelist for CALCFIMSEXE") + nml_template = self.task_config.FIMS_NML_TMPL + nml_data = Jinja(nml_template, localconf).render + logger.debug(f"fims.nml:\n{nml_data}") + + nml_file = os.path.join(localconf.DATA, "fims.nml") + with open(nml_file, "w") as fho: + fho.write(nml_data) - Parameters - ---------- - self : Analysis - Instance of the SnowEnsAnalysis object - """ + logger.info("Link CALCFIMSEXE into DATA/") + exe_src = self.task_config.CALCFIMSEXE + exe_dest = os.path.join(localconf.DATA, os.path.basename(exe_src)) + if os.path.exists(exe_dest): + rm_p(exe_dest) + os.symlink(exe_src, exe_dest) - chdir(self.task_config.DATA) - - arg_list = [ - "--input_mosaic", f"./orog/det/{self.task_config.CASE}_mosaic.nc", - "--input_dir", f"./bkg/det/", - "--input_file", f"{to_fv3time(self.task_config.bkg_time)}.sfc_data", - "--scalar_field", f"snodl", - "--output_dir", f"./bkg/det_ensres/", - "--output_file", f"{to_fv3time(self.task_config.bkg_time)}.sfc_data", - "--output_mosaic", f"./orog/ens/{self.task_config.CASE_ENS}_mosaic.nc", - "--interp_method", f"conserve_order1", - "--weight_file", f"./orog/det/{self.task_config.CASE}.mx{self.task_config.OCNRES}_interp_weight", - "--weight_field", f"lsm_frac", - "--remap_file", f"./remap", - ] - fregrid_exe = os.path.join(self.task_config.DATA, 'fregrid.x') - exec_cmd = Executable(fregrid_exe) + # execute CALCFIMSEXE to calculate IMS snowdepth + exe = Executable(self.task_config.APRUN_CALCFIMS) + exe.add_default_arg(exe_dest) try: - logger.debug(f"Executing {exec_cmd}") - exec_cmd(*arg_list) + logger.debug(f"Executing {exe}") + exe() except OSError: - raise OSError(f"Failed to execute {exec_cmd}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exec_cmd}") + logger.exception(f"Failed to execute {exe}") + raise + except Exception as err: + logger.exception(f"An error occured during execution of {exe}") + raise WorkflowException(f"An error occured during execution of {exe}") from err - @logit(logger) - def regridDetInc(self) -> None: - """Run fregrid to regrid the deterministic snow increment - to the ensemble resolution + # Ensure the snow depth IMS file is produced by the above executable + input_file = f"IMSscf.{to_YMD(localconf.current_cycle)}.{localconf.CASE}_oro_data.nc" + if not os.path.isfile(f"{os.path.join(localconf.DATA, input_file)}"): + logger.exception(f"{self.task_config.CALCFIMSEXE} failed to produce {input_file}") + raise FileNotFoundError(f"{os.path.join(localconf.DATA, input_file)}") - Parameters - ---------- - self : Analysis - Instance of the SnowEnsAnalysis object - """ + # Execute imspy to create the IMS obs data in IODA format + logger.info("Create IMS obs data in IODA format") - chdir(self.task_config.DATA) - - arg_list = [ - "--input_mosaic", f"./orog/det/{self.task_config.CASE}_mosaic.nc", - "--input_dir", f"./inc/det/", - "--input_file", f"snowinc.{to_fv3time(self.task_config.bkg_time)}.sfc_data", - "--scalar_field", f"snodl", - "--output_dir", f"./inc/det_ensres/", - "--output_file", f"snowinc.{to_fv3time(self.task_config.bkg_time)}.sfc_data", - "--output_mosaic", f"./orog/ens/{self.task_config.CASE_ENS}_mosaic.nc", - "--interp_method", f"conserve_order1", - "--weight_file", f"./orog/det/{self.task_config.CASE}.mx{self.task_config.OCNRES}_interp_weight", - "--weight_field", f"lsm_frac", - "--remap_file", f"./remap", - ] - fregrid_exe = os.path.join(self.task_config.DATA, 'fregrid.x') - exec_cmd = Executable(fregrid_exe) + output_file = f"ims_snow_{to_YMDH(localconf.current_cycle)}.nc4" + if os.path.isfile(f"{os.path.join(localconf.DATA, output_file)}"): + rm_p(output_file) + + exe = Executable(self.task_config.IMS2IODACONV) + exe.add_default_arg(["-i", f"{os.path.join(localconf.DATA, input_file)}"]) + exe.add_default_arg(["-o", f"{os.path.join(localconf.DATA, output_file)}"]) try: - logger.debug(f"Executing {exec_cmd}") - exec_cmd(*arg_list) + logger.debug(f"Executing {exe}") + exe() except OSError: - raise OSError(f"Failed to execute {exec_cmd}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exec_cmd}") + logger.exception(f"Failed to execute {exe}") + raise + except Exception as err: + logger.exception(f"An error occured during execution of {exe}") + raise WorkflowException(f"An error occured during execution of {exe}") from err + + # Ensure the IODA snow depth IMS file is produced by the IODA converter + # If so, copy to DATA/obs/ + if not os.path.isfile(f"{os.path.join(localconf.DATA, output_file)}"): + logger.exception(f"{self.task_config.IMS2IODACONV} failed to produce {output_file}") + raise FileNotFoundError(f"{os.path.join(localconf.DATA, output_file)}") + else: + logger.info(f"Copy {output_file} to {os.path.join(localconf.DATA, 'obs')}") + FileHandler(prep_ims_config.ims2ioda).sync() @logit(logger) - def recenterEns(self) -> None: - """Run recentering code to create an ensemble of snow increments - based on the deterministic increment, and the difference - between the determinstic and ensemble mean forecast + def execute(self, jedi_dict_key: str) -> None: + """Run JEDI executable + + This method will run JEDI executables for the global snow analysis Parameters ---------- - self : Analysis - Instance of the SnowEnsAnalysis object + jedi_dict_key + key specifying particular Jedi object in self.jedi_dict + + Returns + ---------- + None """ - logger.info("Running recentering code") - exec_cmd = Executable(self.task_config.APRUN_ESNOWRECEN) - exec_name = os.path.join(self.task_config.DATA, 'gdasapp_land_ensrecenter.x') - exec_cmd.add_default_arg(exec_name) - exec_cmd.add_default_arg(self.task_config.jedi_yaml) - try: - logger.debug(f"Executing {exec_cmd}") - exec_cmd() - except OSError: - raise OSError(f"Failed to execute {exec_cmd}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exec_cmd}") + self.jedi_dict[jedi_dict_key].execute() @logit(logger) def finalize(self) -> None: - """Performs closing actions of the snow ensemble analysis task + """Performs closing actions of the Snow analysis task This method: - - copies the ensemble snow analyses to the proper locations - - copies the ensemble mean increment to COM + - tar and gzip the output diag files and place in COM/ + - copy the generated YAML file from initialize to the COM/ + - copy the analysis files to the COM/ + - copy the increment files to the COM/ Parameters ---------- self : Analysis Instance of the SnowEnsAnalysis object """ - # save files to COM - logger.info(f"Copying files described in {self.task_config.SNOW_ENS_FINALIZE_TMPL}") - snow_final_list = parse_j2yaml(self.task_config.SNOW_ENS_FINALIZE_TMPL, self.task_config) - FileHandler(snow_final_list).sync() + + # ---- tar up diags + # path of output tar statfile + snowstat = os.path.join(self.task_config.COMOUT_SNOW_ANALYSIS, f"{self.task_config.APREFIX}snowstat.tgz") + + # get list of diag files to put in tarball + diags = glob.glob(os.path.join(self.task_config.DATA, 'diags', 'diag*nc')) + + logger.info(f"Compressing {len(diags)} diag files to {snowstat}") + + # gzip the files first + logger.debug(f"Gzipping {len(diags)} diag files") + for diagfile in diags: + with open(diagfile, 'rb') as f_in, gzip.open(f"{diagfile}.gz", 'wb') as f_out: + f_out.writelines(f_in) + + # open tar file for writing + logger.debug(f"Creating tar file {snowstat} with {len(diags)} gzipped diag files") + with tarfile.open(snowstat, "w|gz") as archive: + for diagfile in diags: + diaggzip = f"{diagfile}.gz" + archive.add(diaggzip, arcname=os.path.basename(diaggzip)) + + # get list of yamls to copy to ROTDIR + yamls = glob.glob(os.path.join(self.task_config.DATA, '*snow*yaml')) + + # copy full YAML from executable to ROTDIR + for src in yamls: + yaml_base = os.path.splitext(os.path.basename(src))[0] + dest_yaml_name = f"{self.task_config.RUN}.t{self.task_config.cyc:02d}z.{yaml_base}.yaml" + dest = os.path.join(self.task_config.COMOUT_CONF, dest_yaml_name) + logger.debug(f"Copying {src} to {dest}") + yaml_copy = { + 'copy': [[src, dest]] + } + FileHandler(yaml_copy).sync() + + logger.info("Copy analysis to COM") + bkgtimes = [] + if self.task_config.DOIAU: + # need both beginning and middle of window + bkgtimes.append(self.task_config.SNOW_WINDOW_BEGIN) + bkgtimes.append(self.task_config.current_cycle) + anllist = [] + for mem in range(1, self.task_config.NMEM_ENS + 1): + for bkgtime in bkgtimes: + template = f'{to_fv3time(bkgtime)}.sfc_data.tile{{tilenum}}.nc' + for itile in range(1, self.task_config.ntiles + 1): + filename = template.format(tilenum=itile) + src = os.path.join(self.task_config.DATA, 'anl', f"mem{mem:03d}", filename) + COMOUT_SNOW_ANALYSIS = self.task_config.COMOUT_SNOW_ANALYSIS.replace('ensstat', f"mem{mem:03d}") + dest = os.path.join(COMOUT_SNOW_ANALYSIS, filename) + anllist.append([src, dest]) + FileHandler({'copy': anllist}).sync() + + logger.info('Copy increments to COM') + template = f'snowinc.{to_fv3time(self.task_config.current_cycle)}.sfc_data.tile{{tilenum}}.nc' + inclist = [] + for itile in range(1, self.task_config.ntiles + 1): + filename = template.format(tilenum=itile) + src = os.path.join(self.task_config.DATA, 'anl', filename) + dest = os.path.join(self.task_config.COMOUT_SNOW_ANALYSIS, filename) + inclist.append([src, dest]) + FileHandler({'copy': inclist}).sync() @logit(logger) - def addEnsIncrements(self) -> None: - """Loop through all ensemble members and apply increment to create - a surface analysis for snow + def add_increments(self) -> None: + """Executes the program "apply_incr.exe" to create analysis "sfc_data" files by adding increments to backgrounds Parameters ---------- @@ -286,145 +374,69 @@ def addEnsIncrements(self) -> None: Instance of the SnowEnsAnalysis object """ - bkg_times = [] - # no matter what, we want to process the center of the window - bkg_times.append(self.task_config.current_cycle) - # if DOIAU, we need to copy the increment to be valid at the center of the window - # and compute the analysis there to restart the model if self.task_config.DOIAU: logger.info("Copying increments to beginning of window") - template_in = f'snowinc.{to_fv3time(self.task_config.SNOW_WINDOW_BEGIN)}.sfc_data.tile{{tilenum}}.nc' - template_out = f'snowinc.{to_fv3time(self.task_config.current_cycle)}.sfc_data.tile{{tilenum}}.nc' + template_in = f'snowinc.{to_fv3time(self.task_config.current_cycle)}.sfc_data.tile{{tilenum}}.nc' + template_out = f'snowinc.{to_fv3time(self.task_config.SNOW_WINDOW_BEGIN)}.sfc_data.tile{{tilenum}}.nc' inclist = [] - for itile in range(1, 7): + for itile in range(1, self.task_config.ntiles + 1): filename_in = template_in.format(tilenum=itile) filename_out = template_out.format(tilenum=itile) - src = os.path.join(self.task_config.DATA, 'inc', 'ensmean', filename_in) - dest = os.path.join(self.task_config.DATA, 'inc', 'ensmean', filename_out) + src = os.path.join(self.task_config.DATA, 'anl', filename_in) + dest = os.path.join(self.task_config.DATA, 'anl', filename_out) inclist.append([src, dest]) FileHandler({'copy': inclist}).sync() - # if running with IAU, we also need an analysis at the beginning of the window - bkg_times.append(self.task_config.SNOW_WINDOW_BEGIN) - - for bkg_time in bkg_times: - for mem in range(1, self.task_config.NMEM_ENS + 1): - # for now, just looping serially, should parallelize this eventually - logger.info(f"Now applying increment to member mem{mem:03}") - logger.info(f'{os.path.join(self.task_config.DATA, "anl", f"mem{mem:03}")}') - memdict = AttrDict( - { - 'HOMEgfs': self.task_config.HOMEgfs, - 'DATA': os.path.join(self.task_config.DATA, "anl", f"mem{mem:03}"), - 'DATAROOT': self.task_config.DATA, - 'current_cycle': bkg_time, - 'CASE_ENS': self.task_config.CASE_ENS, - 'OCNRES': self.task_config.OCNRES, - 'ntiles': self.task_config.ntiles, - 'ENS_APPLY_INCR_NML_TMPL': self.task_config.ENS_APPLY_INCR_NML_TMPL, - 'APPLY_INCR_EXE': self.task_config.APPLY_INCR_EXE, - 'APRUN_APPLY_INCR': self.task_config.APRUN_APPLY_INCR, - 'MYMEM': f"{mem:03}", - } - ) - self.add_increments(memdict) - - @staticmethod - @logit(logger) - def get_bkg_dict(config: Dict) -> Dict[str, List[str]]: - """Compile a dictionary of model background files to copy - - This method constructs a dictionary of FV3 RESTART files (coupler, sfc_data) - that are needed for global snow DA and returns said dictionary for use by the FileHandler class. - - Parameters - ---------- - config: Dict - Dictionary of key-value pairs needed in this method - Should contain the following keys: - COMIN_ATMOS_RESTART_PREV - DATA - current_cycle - ntiles - Returns - ---------- - bkg_dict: Dict - a dictionary containing the list of model background files to copy for FileHandler - """ - - bkg_dict = { - 'mkdir': [], - 'copy': [], - } - return bkg_dict - - @staticmethod - @logit(logger) - def add_increments(config: Dict) -> None: - """Executes the program "apply_incr.exe" to create analysis "sfc_data" files by adding increments to backgrounds - - Parameters - ---------- - config: Dict - Dictionary of key-value pairs needed in this method - Should contain the following keys: - HOMEgfs - DATA - DATAROOT - current_cycle - CASE - OCNRES - ntiles - APPLY_INCR_NML_TMPL - APPLY_INCR_EXE - APRUN_APPLY_INCR - - Raises - ------ - OSError - Failure due to OS issues - WorkflowException - All other exceptions - """ - os.chdir(config.DATA) - - logger.info("Create namelist for APPLY_INCR_EXE") - nml_template = config.ENS_APPLY_INCR_NML_TMPL - nml_data = Jinja(nml_template, config).render - logger.debug(f"apply_incr_nml:\n{nml_data}") - - nml_file = os.path.join(config.DATA, "apply_incr_nml") - with open(nml_file, "w") as fho: - fho.write(nml_data) - - logger.info("Link APPLY_INCR_EXE into DATA/") - exe_src = config.APPLY_INCR_EXE - exe_dest = os.path.join(config.DATA, os.path.basename(exe_src)) - if os.path.exists(exe_dest): - rm_p(exe_dest) - os.symlink(exe_src, exe_dest) - - # execute APPLY_INCR_EXE to create analysis files - exe = Executable(config.APRUN_APPLY_INCR) - exe.add_default_arg(os.path.join(config.DATA, os.path.basename(exe_src))) - logger.info(f"Executing {exe}") - try: - exe() - except OSError: - raise OSError(f"Failed to execute {exe}") - except Exception: - raise WorkflowException(f"An error occured during execution of {exe}") - - def get_obs_dict(self) -> Dict[str, Any]: - obs_dict = { - 'mkdir': [], - 'copy': [], - } - return obs_dict - - def get_bias_dict(self) -> Dict[str, Any]: - bias_dict = { - 'mkdir': [], - 'copy': [], - } - return bias_dict + bkgtimes = [] + if self.task_config.DOIAU: + # need both beginning and middle of window + bkgtimes.append(self.task_config.SNOW_WINDOW_BEGIN) + bkgtimes.append(self.task_config.current_cycle) + + # loop over members + # TODO, make this better, or rewrite code to run in parallel + for mem in range(1, self.task_config.NMEM_ENS + 1): + logger.info(f"Processing member mem{mem:03d}") + # loop over times to apply increments + for bkgtime in bkgtimes: + logger.info(f"Processing analysis valid: {bkgtime}") + logger.info("Create namelist for APPLY_INCR_EXE") + nml_template = self.task_config.ENS_APPLY_INCR_NML_TMPL + nml_config = { + 'current_cycle': bkgtime, + 'CASE': self.task_config.CASE, + 'DATA': self.task_config.DATA, + 'HOMEgfs': self.task_config.HOMEgfs, + 'OCNRES': self.task_config.OCNRES, + 'MYMEM': f"{mem:03d}", + 'CASE_ENS': self.task_config.CASE_ENS, + } + nml_data = Jinja(nml_template, nml_config).render + logger.debug(f"apply_incr_nml:\n{nml_data}") + + nml_file = os.path.join(self.task_config.DATA, "apply_incr_nml") + if os.path.exists(nml_file): + rm_p(nml_file) + with open(nml_file, "w") as fho: + fho.write(nml_data) + + logger.info("Link APPLY_INCR_EXE into DATA/") + exe_src = self.task_config.APPLY_INCR_EXE + exe_dest = os.path.join(self.task_config.DATA, os.path.basename(exe_src)) + if os.path.exists(exe_dest): + rm_p(exe_dest) + os.symlink(exe_src, exe_dest) + + # execute APPLY_INCR_EXE to create analysis files + exe = Executable(self.task_config.APRUN_APPLY_INCR) + exe.add_default_arg(exe_dest) + logger.info(f"Executing {exe}") + try: + logger.debug(f"Executing {exe}") + exe() + except OSError: + logger.exception(f"Failed to execute {exe}") + raise + except Exception as err: + logger.exception(f"An error occured during execution of {exe}") + raise WorkflowException(f"An error occured during execution of {exe}") from err diff --git a/versions/fix.ver b/versions/fix.ver index 4739ce778a..991e0ce13a 100644 --- a/versions/fix.ver +++ b/versions/fix.ver @@ -13,6 +13,7 @@ export gdas_soca_ver=20240802 export gdas_gsibec_ver=20240416 export gdas_obs_ver=20240213 export gdas_aero_ver=20240806 +export gdas_snow_ver=20241210 export glwu_ver=20220805 export gsi_ver=20240208 export lut_ver=20220805 diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index e11f708aa6..543d7a9d8c 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -138,7 +138,7 @@ def _get_app_configs(self, run): if options['do_jedisnowda']: configs += ['snowanl'] if options['do_hybvar']: - configs += ['esnowrecen'] + configs += ['esnowanl'] if options['do_mos']: configs += ['mos_stn_prep', 'mos_grd_prep', 'mos_ext_stn_prep', 'mos_ext_grd_prep', @@ -316,7 +316,7 @@ def get_task_names(self): task_names[run] += ['eobs', 'eupd'] task_names[run].append('echgres') if 'gdas' in run else 0 task_names[run] += ['ediag'] if options['lobsdiag_forenkf'] else ['eomg'] - task_names[run].append('esnowrecen') if options['do_jedisnowda'] and 'gdas' in run else 0 + task_names[run].append('esnowanl') if options['do_jedisnowda'] and 'gdas' in run else 0 task_names[run] += ['stage_ic', 'ecen', 'esfc', 'efcs', 'epos', 'earc', 'cleanup'] diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 59b0951d44..54870b79cc 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -586,23 +586,23 @@ def snowanl(self): task = rocoto.create_task(task_dict) return task - def esnowrecen(self): + def esnowanl(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.run.replace("enkf","")}_snowanl'} - deps.append(rocoto.add_dependency(dep_dict)) dep_dict = {'type': 'metatask', 'name': f'{self.run}_epmn', 'offset': f"-{timedelta_to_HMS(self._base['interval_gdas'])}"} deps.append(rocoto.add_dependency(dep_dict)) + dep_dict = {'type': 'task', 'name': f"{self.run.replace('enkf', '')}_prep"} + deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) - resources = self.get_resource('esnowrecen') - task_name = f'{self.run}_esnowrecen' + resources = self.get_resource('esnowanl') + task_name = f'{self.run}_esnowanl' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, 'envars': self.envars, 'cycledef': self.run.replace('enkf', ''), - 'command': f'{self.HOMEgfs}/jobs/rocoto/esnowrecen.sh', + 'command': f'{self.HOMEgfs}/jobs/rocoto/esnowanl.sh', 'job_name': f'{self.pslot}_{task_name}_@H', 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', 'maxtries': '&MAXTRIES;' @@ -2731,7 +2731,7 @@ def esfc(self): dep_dict = {'type': 'task', 'name': f'{self.run}_eupd'} deps.append(rocoto.add_dependency(dep_dict)) if self.options['do_jedisnowda']: - dep_dict = {'type': 'task', 'name': f'{self.run}_esnowrecen'} + dep_dict = {'type': 'task', 'name': f'{self.run}_esnowanl'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py index c0496a4996..d9c769ffbe 100644 --- a/workflow/rocoto/tasks.py +++ b/workflow/rocoto/tasks.py @@ -20,7 +20,7 @@ class Tasks: 'eobs', 'eomg', 'epos', 'esfc', 'eupd', 'atmensanlinit', 'atmensanlobs', 'atmensanlsol', 'atmensanlletkf', 'atmensanlfv3inc', 'atmensanlfinal', 'aeroanlinit', 'aeroanlvar', 'aeroanlfinal', 'aeroanlgenb', - 'snowanl', 'esnowrecen', + 'snowanl', 'esnowanl', 'fcst', 'atmanlupp', 'atmanlprod', 'atmupp', 'goesupp', 'atmos_prod', 'ocean_prod', 'ice_prod', From e6849447fb231979ec2112594d51be9e93b99a5e Mon Sep 17 00:00:00 2001 From: Rhae Sung Kim Date: Fri, 20 Dec 2024 02:19:37 -0500 Subject: [PATCH 03/33] Change orog gravity wave drag scheme for grid sizes less than 10km (#3175) It has been found that while the HR4 GFS update improves the forecast skill compared to the HR3 update, its skill is still less than that of the HR2 update. To further improve the HR4 forecast skill, the tunable parameter in the OGWD, 'effective grid size', has been tested. The magnitude of the OGWD is inversely proportional to the effective grid size (dx). The current default value of the effective grid size (dx) is 6*dx, which was optimally tuned for the C768 resolution (13km). It appears that the 6*dx of the effective dx leads to too small OGWD for the C1152 resolution (9km). Therefore, for the higher resolution of which grid sizes are less than 10 km (i.e., C1152 and C3072) the effective dx has been reduced to 2*dx in this update, which increased the OGWD and significantly improved the GFS forecast skill in the C1152 resolution, compatible to the HR2 forecast skill. Resolves: #3181 --- parm/config/gefs/config.ufs | 7 +++++++ parm/config/gfs/config.ufs | 11 +++++++++++ 2 files changed, 18 insertions(+) diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs index e5859bd801..c46023aff6 100644 --- a/parm/config/gefs/config.ufs +++ b/parm/config/gefs/config.ufs @@ -83,6 +83,7 @@ case "${fv3_res}" in export xr_cnvcld=".true." # Pass conv. clouds to Xu-Randall cloud fraction export cdmbgwd="0.071,2.1,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="40.0,1.77,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export k_split=1 export n_split=4 export tau=10.0 @@ -107,6 +108,7 @@ case "${fv3_res}" in export xr_cnvcld=".true." # Pass conv. clouds to Xu-Randall cloud fraction export cdmbgwd="0.14,1.8,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="20.0,2.5,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=3.0e-3 # setting for UGWPv1 non-stationary GWD export k_split=1 export n_split=4 @@ -130,6 +132,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=2 export cdmbgwd="0.23,1.5,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="10.0,3.5,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=1.5e-3 # setting for UGWPv1 non-stationary GWD export xr_cnvcld=".true." # Pass conv. clouds to Xu-Randall cloud fraction export k_split=2 @@ -154,6 +157,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=2 export cdmbgwd="1.1,0.72,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="5.0,5.0,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=0.8e-3 # setting for UGWPv1 non-stationary GWD export k_split=2 export n_split=4 @@ -177,6 +181,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=4 export cdmbgwd="4.0,0.15,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="2.5,7.5,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=0.5e-3 # setting for UGWPv1 non-stationary GWD export k_split=2 export n_split=4 @@ -200,6 +205,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=4 export cdmbgwd="4.0,0.10,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="1.67,8.8,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=2.0 export knob_ugwp_tauamp=0.35e-3 # setting for UGWPv1 non-stationary GWD export k_split=2 export n_split=6 @@ -223,6 +229,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=4 export cdmbgwd="4.0,0.05,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="0.625,14.1,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=2.0 export knob_ugwp_tauamp=0.13e-3 # setting for UGWPv1 non-stationary GWD export k_split=4 export n_split=5 diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs index 3f8e7022fa..9737404dd1 100644 --- a/parm/config/gfs/config.ufs +++ b/parm/config/gfs/config.ufs @@ -99,6 +99,7 @@ case "${fv3_res}" in export xr_cnvcld=".false." # Do not pass conv. clouds to Xu-Randall cloud fraction export cdmbgwd="0.071,2.1,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="40.0,1.77,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=6.0e-3 # setting for UGWPv1 non-stationary GWD export k_split=1 export n_split=4 @@ -124,6 +125,7 @@ case "${fv3_res}" in export npy_nest=241 export NEST_DLON=0.25 export NEST_DLAT=0.25 + export psl_gwd_dx_factor=6.0 export WRITE_GROUP_GDAS=2 export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=2 export WRITE_GROUP_GFS=2 @@ -141,6 +143,7 @@ case "${fv3_res}" in export xr_cnvcld=.false. # Do not pass conv. clouds to Xu-Randall cloud fraction export cdmbgwd="0.14,1.8,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="20.0,2.5,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=3.0e-3 # setting for UGWPv1 non-stationary GWD export k_split=1 export n_split=4 @@ -167,6 +170,7 @@ case "${fv3_res}" in export npy_nest=481 export NEST_DLON=0.125 export NEST_DLAT=0.125 + export psl_gwd_dx_factor=6.0 export WRITE_GROUP_GDAS=2 export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=15 export WRITE_GROUP_GFS=2 @@ -183,6 +187,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=2 export cdmbgwd="0.23,1.5,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="10.0,3.5,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=1.5e-3 # setting for UGWPv1 non-stationary GWD export k_split=2 export n_split=4 @@ -211,6 +216,7 @@ case "${fv3_res}" in export npy_nest=961 export NEST_DLON=0.0625 export NEST_DLAT=0.0625 + export psl_gwd_dx_factor=2.0 export WRITE_GROUP_GDAS=2 export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=20 export WRITE_GROUP_GFS=2 @@ -227,6 +233,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=2 export cdmbgwd="1.1,0.72,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="5.0,5.0,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=0.8e-3 # setting for UGWPv1 non-stationary GWD export k_split=2 export n_split=4 @@ -258,6 +265,7 @@ case "${fv3_res}" in export npy_nest=1921 export NEST_DLON=0.0325 export NEST_DLAT=0.0325 + export psl_gwd_dx_factor=2.0 export WRITE_GROUP_GDAS=2 export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=90 export WRITE_GROUP_GFS=2 @@ -274,6 +282,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=4 export cdmbgwd="4.0,0.15,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="2.5,7.5,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=6.0 export knob_ugwp_tauamp=0.5e-3 # setting for UGWPv1 non-stationary GWD export k_split=2 export n_split=4 @@ -298,6 +307,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=4 export cdmbgwd="4.0,0.10,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="1.67,8.8,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=2.0 export knob_ugwp_tauamp=0.35e-3 # setting for UGWPv1 non-stationary GWD export k_split=2 export n_split=6 @@ -321,6 +331,7 @@ case "${fv3_res}" in export nthreads_ufs_gfs=4 export cdmbgwd="4.0,0.05,1.0,1.0" # mountain blocking, ogwd, cgwd, cgwd src scaling export cdmbgwd_gsl="0.625,14.1,1.0,1.0" # settings for GSL drag suite + export psl_gwd_dx_factor=2.0 export knob_ugwp_tauamp=0.13e-3 # setting for UGWPv1 non-stationary GWD export k_split=4 export n_split=5 From 290f1d2d0b8a461e6ada787c673c713573ab2be1 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Tue, 24 Dec 2024 01:05:34 -0700 Subject: [PATCH 04/33] Support global-workflow using Rocky 8 on CSPs (#2998) # Description With ParallelWorks now default Rocky 8 on CSPs, and move to Rocky 8 only after 1/1/2025, we need to modify global-workflow module files to use Rocky 8 supported spack-stack, and test compile and run to make sure all works under Rocky 8. i) Rocky 8 update new features: a. Wave worked in C48_S2SWA_gefs case, so turn SUPPORT_WAVES to "YES" in awspw.yaml. Actually, if we did not set SUPPORT_WAVES to "YES", setup_expt.py will rise exception. b. Using two type of nodes (chips/queues) on AWS, compute/process, where forecasts run in "compute" queue, which is a big node (more cores), others run in "process" queue, which has small node (less cores). ii) Rocky 8 update needs the following submodules PRs below - NOAA-EMC/gfs_utils#81 - NOAA-EMC/ufs_utils#989 - NOAA-EMC/upp#1034 - ufs-community/ufs-weather-model#2461 Resolves #2997 --------- Co-authored-by: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> --- env/AWSPW.env | 68 ++++++++++-------------- env/AZUREPW.env | 17 +++--- env/GOOGLEPW.env | 4 +- modulefiles/module_base.noaacloud.lua | 3 ++ modulefiles/module_gwci.noaacloud.lua | 6 +-- modulefiles/module_gwsetup.noaacloud.lua | 13 ++--- parm/config/gefs/config.resources | 6 +-- parm/config/gefs/config.resources.AWSPW | 58 ++++++++++++++++++++ parm/config/gfs/config.resources | 8 +-- parm/config/gfs/config.resources.AWSPW | 24 +++++++++ sorc/build_ufs.sh | 2 +- sorc/gfs_utils.fd | 2 +- sorc/ufs_utils.fd | 2 +- versions/build.noaacloud.ver | 6 +-- versions/run.noaacloud.ver | 6 +-- workflow/hosts/awspw.yaml | 4 +- workflow/hosts/azurepw.yaml | 6 ++- workflow/hosts/googlepw.yaml | 6 ++- workflow/setup_expt.py | 4 -- 19 files changed, 161 insertions(+), 84 deletions(-) diff --git a/env/AWSPW.env b/env/AWSPW.env index e366128a1d..f365695f85 100755 --- a/env/AWSPW.env +++ b/env/AWSPW.env @@ -33,7 +33,29 @@ else exit 2 fi -if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then +if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then + + export POE="NO" + export BACK="NO" + export sys_tp="AWSPW" + export launcher_PREP="srun" + +elif [[ "${step}" = "prepsnowobs" ]]; then + + export APRUN_CALCFIMS="${APRUN_default}" + +elif [[ "${step}" = "prep_emissions" ]]; then + + export APRUN="${APRUN_default}" + +elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then + + export CFP_MP="YES" + if [[ "${step}" = "waveprep" ]]; then export MP_PULSE=0 ; fi + export wavempexec=${launcher} + export wave_mpmd=${mpmd_opt} + +elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then export launcher="srun --mpi=pmi2 -l" @@ -52,52 +74,16 @@ elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step} elif [[ "${step}" = "post" ]]; then - export NTHREADS_NP=${NTHREADS1} - export APRUN_NP="${APRUN_default}" - - export NTHREADS_DWN=${threads_per_task_dwn:-1} - [[ ${NTHREADS_DWN} -gt ${max_threads_per_task} ]] && export NTHREADS_DWN=${max_threads_per_task} - export APRUN_DWN="${launcher} -n ${ntasks_dwn}" - -elif [[ "${step}" = "atmos_products" ]]; then - - export USE_CFP="YES" # Use MPMD for downstream product generation on Hera + export NTHREADS_UPP=${NTHREADS1} + export APRUN_UPP="${APRUN_default} --cpus-per-task=${NTHREADS_UPP}" elif [[ "${step}" = "oceanice_products" ]]; then export NTHREADS_OCNICEPOST=${NTHREADS1} export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}" -elif [[ "${step}" = "ecen" ]]; then - - export NTHREADS_ECEN=${NTHREADSmax} - export APRUN_ECEN="${APRUN_default}" - - export NTHREADS_CHGRES=${threads_per_task_chgres:-12} - [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node} - export APRUN_CHGRES="time" - - export NTHREADS_CALCINC=${threads_per_task_calcinc:-1} - [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task} - export APRUN_CALCINC="${APRUN_default}" - -elif [[ "${step}" = "esfc" ]]; then - - export NTHREADS_ESFC=${NTHREADSmax} - export APRUN_ESFC="${APRUN_default}" - - export NTHREADS_CYCLE=${threads_per_task_cycle:-14} - [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node} - export APRUN_CYCLE="${APRUN_default}" - -elif [[ "${step}" = "epos" ]]; then - - export NTHREADS_EPOS=${NTHREADSmax} - export APRUN_EPOS="${APRUN_default}" - -elif [[ "${step}" = "fit2obs" ]]; then +elif [[ "${step}" = "atmos_products" ]]; then - export NTHREADS_FIT2OBS=${NTHREADS1} - export MPIRUN="${APRUN_default}" + export USE_CFP="YES" # Use MPMD for downstream product generation on AWS fi diff --git a/env/AZUREPW.env b/env/AZUREPW.env index c2faeb2bf6..b2b4063ff3 100755 --- a/env/AZUREPW.env +++ b/env/AZUREPW.env @@ -15,6 +15,7 @@ export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" # Configure MPI environment export OMP_STACKSIZE=2048000 export NTHSTACK=1024000000 +export UCX_TLS=ud,sm,self ulimit -s unlimited ulimit -a @@ -50,6 +51,10 @@ elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step} export wavempexec=${launcher} export wave_mpmd=${mpmd_opt} +elif [[ "${step}" = "prep_emissions" ]]; then + + export APRUN="${APRUN_default}" + elif [[ "${step}" = "post" ]]; then export NTHREADS_NP=${NTHREADS1} @@ -71,7 +76,7 @@ elif [[ "${step}" = "oceanice_products" ]]; then elif [[ "${step}" = "ecen" ]]; then export NTHREADS_ECEN=${NTHREADSmax} - export APRUN_ECEN="${APRUN}" + export APRUN_ECEN="${APRUN_default}" export NTHREADS_CHGRES=${threads_per_task_chgres:-12} [[ ${NTHREADS_CHGRES} -gt ${max_tasks_per_node} ]] && export NTHREADS_CHGRES=${max_tasks_per_node} @@ -79,25 +84,25 @@ elif [[ "${step}" = "ecen" ]]; then export NTHREADS_CALCINC=${threads_per_task_calcinc:-1} [[ ${NTHREADS_CALCINC} -gt ${max_threads_per_task} ]] && export NTHREADS_CALCINC=${max_threads_per_task} - export APRUN_CALCINC="${APRUN}" + export APRUN_CALCINC="${APRUN_default}" elif [[ "${step}" = "esfc" ]]; then export NTHREADS_ESFC=${NTHREADSmax} - export APRUN_ESFC="${APRUN}" + export APRUN_ESFC="${APRUN_default}" export NTHREADS_CYCLE=${threads_per_task_cycle:-14} [[ ${NTHREADS_CYCLE} -gt ${max_tasks_per_node} ]] && export NTHREADS_CYCLE=${max_tasks_per_node} - export APRUN_CYCLE="${APRUN}" + export APRUN_CYCLE="${APRUN_default}" elif [[ "${step}" = "epos" ]]; then export NTHREADS_EPOS=${NTHREADSmax} - export APRUN_EPOS="${APRUN}" + export APRUN_EPOS="${APRUN_default}" elif [[ "${step}" = "fit2obs" ]]; then export NTHREADS_FIT2OBS=${NTHREADS1} - export MPIRUN="${APRUN}" + export MPIRUN="${APRUN_default}" fi diff --git a/env/GOOGLEPW.env b/env/GOOGLEPW.env index c3b5ec806a..d84008d648 100755 --- a/env/GOOGLEPW.env +++ b/env/GOOGLEPW.env @@ -45,7 +45,7 @@ if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then elif [[ "${step}" = "prep_emissions" ]]; then - export APRUN + export APRUN="${APRUN_default}" elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then @@ -102,6 +102,6 @@ elif [[ "${step}" = "epos" ]]; then elif [[ "${step}" = "fit2obs" ]]; then export NTHREADS_FIT2OBS=${NTHREADS1} - export MPIRUN="${APRUN}" + export MPIRUN="${APRUN_default}" fi diff --git a/modulefiles/module_base.noaacloud.lua b/modulefiles/module_base.noaacloud.lua index 7997b618e4..3a7cc75d7a 100644 --- a/modulefiles/module_base.noaacloud.lua +++ b/modulefiles/module_base.noaacloud.lua @@ -5,8 +5,11 @@ Load environment to run GFS on noaacloud local spack_mod_path=(os.getenv("spack_mod_path") or "None") prepend_path("MODULEPATH", spack_mod_path) +load("gnu") load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) +unload("gnu") + load(pathJoin("python", (os.getenv("python_ver") or "None"))) load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) diff --git a/modulefiles/module_gwci.noaacloud.lua b/modulefiles/module_gwci.noaacloud.lua index c3142cd60d..2ac284ef85 100644 --- a/modulefiles/module_gwci.noaacloud.lua +++ b/modulefiles/module_gwci.noaacloud.lua @@ -2,10 +2,10 @@ help([[ Load environment to run GFS workflow setup scripts on noaacloud ]]) -prepend_path("MODULEPATH", "/contrib/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core") +prepend_path("MODULEPATH", "/contrib/spack-stack-rocky8/spack-stack-1.6.0/envs/ue-env/install/modulefiles/Core") -load(pathJoin("stack-intel", os.getenv("2021.3.0"))) -load(pathJoin("stack-intel-oneapi-mpi", os.getenv("2021.3.0"))) +load(pathJoin("stack-intel", os.getenv("2021.10.0"))) +load(pathJoin("stack-intel-oneapi-mpi", os.getenv("2021.10.0"))) load(pathJoin("netcdf-c", os.getenv("4.9.2"))) load(pathJoin("netcdf-fortran", os.getenv("4.6.1"))) diff --git a/modulefiles/module_gwsetup.noaacloud.lua b/modulefiles/module_gwsetup.noaacloud.lua index f3845e8d72..e2aa4050a3 100644 --- a/modulefiles/module_gwsetup.noaacloud.lua +++ b/modulefiles/module_gwsetup.noaacloud.lua @@ -4,17 +4,18 @@ Load environment to run GFS workflow setup scripts on noaacloud load(pathJoin("rocoto")) -prepend_path("MODULEPATH", "/contrib/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core") +prepend_path("MODULEPATH", "/contrib/spack-stack-rocky8/spack-stack-1.6.0/envs/ue-intel/install/modulefiles/Core") -local stack_intel_ver=os.getenv("stack_intel_ver") or "2021.3.0" -local python_ver=os.getenv("python_ver") or "3.10.3" +load("gnu") +local stack_intel_ver=os.getenv("stack_intel_ver") or "2021.10.0" +local stack_mpi_ver=os.getenv("stack_mpi_ver") or "2021.10.0" load(pathJoin("stack-intel", stack_intel_ver)) -load(pathJoin("python", python_ver)) +load(pathJoin("stack-intel-oneapi-mpi", stack_mpi_ver)) +unload("gnu") + load("py-jinja2") load("py-pyyaml") load("py-numpy") -local git_ver=os.getenv("git_ver") or "1.8.3.1" -load(pathJoin("git", git_ver)) whatis("Description: GFS run setup environment") diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index e1b9a036de..68f81c1039 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -41,15 +41,15 @@ case ${machine} in ;; "AWSPW") export PARTITION_BATCH="compute" - max_tasks_per_node=36 + max_tasks_per_node=48 ;; "AZUREPW") export PARTITION_BATCH="compute" - max_tasks_per_node=24 + max_tasks_per_node=36 ;; "GOOGLEPW") export PARTITION_BATCH="compute" - max_tasks_per_node=32 + max_tasks_per_node=30 ;; *) echo "FATAL ERROR: Unknown machine encountered by ${BASH_SOURCE[0]}" diff --git a/parm/config/gefs/config.resources.AWSPW b/parm/config/gefs/config.resources.AWSPW index a735c7622d..f91460b6aa 100644 --- a/parm/config/gefs/config.resources.AWSPW +++ b/parm/config/gefs/config.resources.AWSPW @@ -9,3 +9,61 @@ unset memory for mem_var in $(env | grep '^memory_' | cut -d= -f1); do unset "${mem_var}" done + +step=$1 + +case ${step} in + "fcst" | "efcs") + export PARTITION_BATCH="compute" + max_tasks_per_node=48 + ;; + + "arch") + export PARTITION_BATCH="process" + max_tasks_per_node=24 + ;; + + "prep_emissions") + export PARTITION_BATCH="process" + max_tasks_per_node=24 + export ntasks=1 + export threads_per_task=1 + export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) + ;; + + "waveinit") + export PARTITION_BATCH="process" + max_tasks_per_node=24 + export ntasks=12 + export threads_per_task=1 + export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) + export NTASKS=${ntasks} + ;; + + "wavepostpnt") + export PARTITION_BATCH="compute" + max_tasks_per_node=48 + export ntasks=240 + export threads_per_task=1 + export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) + export NTASKS=${ntasks} + ;; + + "wavepostsbs" | "wavepostbndpnt" | "wavepostbndpntbll") + export PARTITION_BATCH="process" + max_tasks_per_node=24 + export ntasks=24 + export threads_per_task=1 + export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) + export NTASKS=${ntasks} + ;; + + *) + export PARTITION_BATCH="process" + max_tasks_per_node=24 + ;; + +esac + +export max_tasks_per_node + diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 230872b8f3..eeb33716c0 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -107,16 +107,16 @@ case ${machine} in ;; "AWSPW") export PARTITION_BATCH="compute" - npe_node_max=36 - max_tasks_per_node=36 + npe_node_max=48 + max_tasks_per_node=48 # TODO Supply a max mem/node value for AWS # shellcheck disable=SC2034 mem_node_max="" ;; "AZUREPW") export PARTITION_BATCH="compute" - npe_node_max=24 - max_tasks_per_node=24 + npe_node_max=36 + max_tasks_per_node=36 # TODO Supply a max mem/node value for AZURE # shellcheck disable=SC2034 mem_node_max="" diff --git a/parm/config/gfs/config.resources.AWSPW b/parm/config/gfs/config.resources.AWSPW index a735c7622d..22fe110670 100644 --- a/parm/config/gfs/config.resources.AWSPW +++ b/parm/config/gfs/config.resources.AWSPW @@ -9,3 +9,27 @@ unset memory for mem_var in $(env | grep '^memory_' | cut -d= -f1); do unset "${mem_var}" done + +step=$1 + +case ${step} in + "fcst" | "efcs") + export PARTITION_BATCH="compute" + max_tasks_per_node=48 + ;; + + "arch") + export PARTITION_BATCH="process" + max_tasks_per_node=24 + ;; + + + *) + export PARTITION_BATCH="process" + max_tasks_per_node=24 + ;; + +esac + +export max_tasks_per_node + diff --git a/sorc/build_ufs.sh b/sorc/build_ufs.sh index 773c104be3..3b0b3ed638 100755 --- a/sorc/build_ufs.sh +++ b/sorc/build_ufs.sh @@ -12,7 +12,7 @@ EXEC_NAME="gfs_model.x" while getopts ":da:fj:e:vwy" option; do case "${option}" in - d) BUILD_TYPE="Debug";; + d) BUILD_TYPE="DEBUG";; a) APP="${OPTARG}";; f) FASTER="ON";; j) BUILD_JOBS="${OPTARG}";; diff --git a/sorc/gfs_utils.fd b/sorc/gfs_utils.fd index 856a42076a..4848ecbb5e 160000 --- a/sorc/gfs_utils.fd +++ b/sorc/gfs_utils.fd @@ -1 +1 @@ -Subproject commit 856a42076a65256aaae9b29f4891532cb4a3fbca +Subproject commit 4848ecbb5e713b16127433e11f7d3edc6ac784c4 diff --git a/sorc/ufs_utils.fd b/sorc/ufs_utils.fd index 06eec5b6f6..2323761084 160000 --- a/sorc/ufs_utils.fd +++ b/sorc/ufs_utils.fd @@ -1 +1 @@ -Subproject commit 06eec5b6f636123835e2dfd9fc5229980c006735 +Subproject commit 23237610845c3a4438b21b25e9b3dc25c4c15b73 diff --git a/versions/build.noaacloud.ver b/versions/build.noaacloud.ver index fc288b76b5..b5fd272b4b 100644 --- a/versions/build.noaacloud.ver +++ b/versions/build.noaacloud.ver @@ -1,5 +1,5 @@ -export stack_intel_ver=2021.3.0 -export stack_impi_ver=2021.3.0 +export stack_intel_ver=2021.10.0 +export stack_impi_ver=2021.10.0 export spack_env=gsi-addon-env source "${HOMEgfs:-}/versions/spack.ver" -export spack_mod_path="/contrib/spack-stack/spack-stack-${spack_stack_ver}/envs/gsi-addon-env/install/modulefiles/Core" +export spack_mod_path="/contrib/spack-stack-rocky8/spack-stack-${spack_stack_ver}/envs/gsi-addon-env/install/modulefiles/Core" diff --git a/versions/run.noaacloud.ver b/versions/run.noaacloud.ver index 1fc3779b2e..98ec2b36f9 100644 --- a/versions/run.noaacloud.ver +++ b/versions/run.noaacloud.ver @@ -1,8 +1,8 @@ -export stack_intel_ver=2021.3.0 -export stack_impi_ver=2021.3.0 +export stack_intel_ver=2021.10.0 +export stack_impi_ver=2021.10.0 export spack_env=gsi-addon-env source "${HOMEgfs:-}/versions/spack.ver" -export spack_mod_path="/contrib/spack-stack/spack-stack-${spack_stack_ver}/envs/gsi-addon-env/install/modulefiles/Core" +export spack_mod_path="/contrib/spack-stack-rocky8/spack-stack-${spack_stack_ver}/envs/gsi-addon-env/install/modulefiles/Core" export cdo_ver=2.2.0 diff --git a/workflow/hosts/awspw.yaml b/workflow/hosts/awspw.yaml index b98c838faa..c80800725a 100644 --- a/workflow/hosts/awspw.yaml +++ b/workflow/hosts/awspw.yaml @@ -27,5 +27,5 @@ MAKE_ACFTBUFR: 'NO' DO_TRACKER: 'NO' DO_GENESIS: 'NO' DO_METP: 'NO' -SUPPORT_WAVES: 'NO' -SUPPORTED_RESOLUTIONS: ['C48', 'C96'] # TODO: Test and support all cubed-sphere resolutions. +SUPPORTED_RESOLUTIONS: ['C48', 'C96', 'C192', 'C384', 'C768'] # TODO: Test and support all cubed-sphere resolutions. +AERO_INPUTS_DIR: /contrib/global-workflow-shared-data/data/gocart_emissions diff --git a/workflow/hosts/azurepw.yaml b/workflow/hosts/azurepw.yaml index 4725e28962..d7c064dc60 100644 --- a/workflow/hosts/azurepw.yaml +++ b/workflow/hosts/azurepw.yaml @@ -24,5 +24,7 @@ LOCALARCH: 'NO' ATARDIR: '' # TODO: This will not yet work from AZURE. MAKE_NSSTBUFR: 'NO' MAKE_ACFTBUFR: 'NO' -SUPPORT_WAVES: 'NO' -SUPPORTED_RESOLUTIONS: ['C48', 'C96'] # TODO: Test and support all cubed-sphere resolutions. +DO_TRACKER: 'NO' +DO_GENESIS: 'NO' +DO_METP: 'NO' +SUPPORTED_RESOLUTIONS: ['C48', 'C96', 'C384', 'C768'] # TODO: Test and support all cubed-sphere resolutions. diff --git a/workflow/hosts/googlepw.yaml b/workflow/hosts/googlepw.yaml index 1b979b6bc9..8ba8e18e74 100644 --- a/workflow/hosts/googlepw.yaml +++ b/workflow/hosts/googlepw.yaml @@ -24,5 +24,7 @@ LOCALARCH: 'NO' ATARDIR: '' # TODO: This will not yet work from GOOGLE. MAKE_NSSTBUFR: 'NO' MAKE_ACFTBUFR: 'NO' -SUPPORT_WAVES: 'NO' -SUPPORTED_RESOLUTIONS: ['C48', 'C96'] # TODO: Test and support all cubed-sphere resolutions. +DO_TRACKER: 'NO' +DO_GENESIS: 'NO' +DO_METP: 'NO' +SUPPORTED_RESOLUTIONS: ['C48', 'C96', 'C384'] # TODO: Test and support all cubed-sphere resolutions. diff --git a/workflow/setup_expt.py b/workflow/setup_expt.py index 574dc0d91a..09bc1c90ac 100755 --- a/workflow/setup_expt.py +++ b/workflow/setup_expt.py @@ -372,7 +372,6 @@ def query_and_clean(dirname, force_clean=False): def validate_user_request(host, inputs): supp_res = host.info['SUPPORTED_RESOLUTIONS'] - supp_waves = host.info.get('SUPPORT_WAVES', 'YES') machine = host.machine for attr in ['resdetatmos', 'resensatmos']: try: @@ -382,9 +381,6 @@ def validate_user_request(host, inputs): if expt_res not in supp_res: raise NotImplementedError(f"Supported resolutions on {machine} are:\n{', '.join(supp_res)}") - if "W" in inputs.app and supp_waves == "NO": - raise NotImplementedError(f"Waves are not supported on {machine}") - def get_ocean_resolution(resdetatmos): """ From d85214db1683848019d67815c63dc663c6049ec5 Mon Sep 17 00:00:00 2001 From: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> Date: Tue, 24 Dec 2024 17:48:19 -0500 Subject: [PATCH 05/33] Create compute build option (#3186) This creates scripts to run compute-node builds and also refactors the build_all.sh script to make it easier to build all executables. In place of various options to control what components are built when using `build_all.sh`, instead it takes in a list of one or more systems to build: - `gfs` builds everything needed for forecast-only gfs (UFS model with unstructured wave grid, gfs_utils, ufs_utils, upp, ww3 pre/post for unstructured wave grid) - `gefs` builds everything needed for GEFS (UFS model with structured wave grid, gfs_utils, ufs_utils, upp, ww3 pre/post for structured wave grid) - `sfs` builds everything needed SFS (UFS model in hydrostatic mode with unstructured wave grid, gfs_utils, ufs_utils, upp, ww3 pre/post for structured wave grid) - `gsi` builds GSI-based DA components (gsi_enkf, gsi_monitor, gsi_utils) - `gdas` builds JEDI-based DA components (gdas app, gsi_monitor, gsi_utils) `all` will build all of the above (mostly for testing) Examples: Build for forecast-only GFS: ```./build_all.sh gfs``` Build cycled GFS including coupled DA: ``` ./build_all.sh gfs gsi gdas``` Build GEFS: ```./build_all.sh gefs``` Build everything (for testing purposes): ```./build_all.sh all``` Other options, such as `-d` to build in debug mode, remain unchanged. The full script signature is now: ``` ./build_all.sh [-a UFS_app][-c build_config][-d][-f][-h][-v] [gfs] [gefs] [sfs] [gsi] [gdas] [all] ``` Additionally, there is a new script to build components on the compute nodes using the job scheduler instead of the login node. This method takes the load off of the login nodes and may be faster in some cases. Compute build is invoked using the build_compute.sh script, which behaves similarly to the new `build_all.sh:` ``` ./build_compute.sh [-h][-v][-A ] [gfs] [gefs] [sfs] [gsi] [gdas] [all] ``` Compute build will generate a rocoto workflow and then call `rocotorun` itself repeatedly until either a build fails or all builds succeed, at which point the script will exit. Since the script is calling `rocotorun` itself, you don't need to set up your own cron to do it, but advanced users can also use all the regular rocoto tools on `build.xml` and `build.db` if you wish. Some things to note with the compute build: - When a build fails, other build jobs are not cancelled and will continue to run. - Since the script stops running `rocotorun` once one build fails, the rocoto database will no longer update with the status of the remaining jobs after that point. - Similarly, if the terminal running `build_compute.sh` gets disconnected, the rocoto database will no longer update. - In either of the above cases, you could run `rocotorun` yourself manually to update the database as long as the job information hasn't aged off the scheduler yet. Resolves #3131 --------- Co-authored-by: Rahul Mahajan --- .github/CODEOWNERS | 1 + .gitignore | 3 + ci/Jenkinsfile | 4 +- docs/source/clone.rst | 56 +++------ sorc/build_all.sh | 211 ++++++++++++++++----------------- sorc/build_compute.sh | 115 ++++++++++++++++++ sorc/build_upp.sh | 25 ++++ workflow/build_compute.py | 178 +++++++++++++++++++++++++++ workflow/build_opts.yaml | 94 +++++++++++++++ workflow/generate_workflows.sh | 47 ++------ 10 files changed, 548 insertions(+), 186 deletions(-) create mode 100755 sorc/build_compute.sh create mode 100755 workflow/build_compute.py create mode 100644 workflow/build_opts.yaml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5068b961f7..b0b51922c5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -211,3 +211,4 @@ ush/python/pygfs/utils/marine_da_utils.py @guillaumevernieres @AndrewEichmann-NO # Specific workflow scripts workflow/generate_workflows.sh @DavidHuber-NOAA +workflow/build_compute.py @DavidHuber-NOAA @aerorahul diff --git a/.gitignore b/.gitignore index 49fb3f438a..f3cb1e1b3e 100644 --- a/.gitignore +++ b/.gitignore @@ -85,6 +85,9 @@ parm/wafs # Ignore sorc and logs folders from externals #-------------------------------------------- +sorc/build.xml +sorc/build.db +sorc/build_lock.db sorc/*log sorc/logs sorc/calc_analysis.fd diff --git a/ci/Jenkinsfile b/ci/Jenkinsfile index 9e2381268d..b7a29e15b0 100644 --- a/ci/Jenkinsfile +++ b/ci/Jenkinsfile @@ -120,9 +120,7 @@ pipeline { def error_logs_message = "" dir("${HOMEgfs}/sorc") { try { - sh(script: './build_all.sh -kgu') // build the global-workflow executables for GFS variant (UFS-wx-model, WW3 pre/post executables) - sh(script: './build_ww3prepost.sh -w > ./logs/build_ww3prepost_gefs.log 2>&1') // build the WW3 pre/post processing executables for GEFS variant - sh(script: './build_ufs.sh -w -e gefs_model.x > ./logs/build_ufs_gefs.log 2>&1') // build the UFS-wx-model executable for GEFS variant + sh(script: './build_compute.sh all') // build the global-workflow executables } catch (Exception error_build) { echo "Failed to build global-workflow: ${error_build.getMessage()}" if ( fileExists("logs/error.logs") ) { diff --git a/docs/source/clone.rst b/docs/source/clone.rst index d3f81f2e47..ec0018157a 100644 --- a/docs/source/clone.rst +++ b/docs/source/clone.rst @@ -18,35 +18,39 @@ Clone the `global-workflow` and `cd` into the `sorc` directory: git clone --recursive https://github.com/NOAA-EMC/global-workflow cd global-workflow/sorc -For forecast-only (coupled or uncoupled) build of the components: +.. _build_examples: + +The build_all.sh script can be used to build all required components of the global workflow. The accepted arguments is a list of systems to be built. This includes builds for GFS and GEFS forecast-only experiments, GSI and GDASApp-based DA for cycled GFS experiments. See `feature availability `__ to see which system(s) are available on each supported system. :: - ./build_all.sh + ./build_all.sh [gfs] [gefs] [gs] [gdas] [all] -For cycled (w/ data assimilation) use the `-g` option during build: +For example, to run GFS experiments with GSI DA, execute: :: - ./build_all.sh -g + ./build_all.sh gfs gsi -For coupled cycling (include new UFSDA) use the `-gu` options during build: +This builds the GFS, UFS-utils, GFS-utils, WW3 with PDLIB (structured wave grids), UPP, GSI, GSI-monitor, and GSI-utils executables. -[Currently only available on Hera, Orion, and Hercules] +For coupled cycling (include new UFSDA) execute: :: - ./build_all.sh -gu + ./build_all.sh gfs gdas +This builds all of the same executables, except it builds the GDASApp instead of the GSI. -For building without PDLIB (unstructured grid) for the wave model, use the `-w` options during build: +To run GEFS (forecast-only) execute: :: - ./build_all.sh -w + ./build_all.sh gefs +This builds the GEFS, UFS-utils, GFS-utils, WW3 *without* PDLIB (unstructure wave grids), and UPP executables. -Build workflow components and link workflow artifacts such as executables, etc. +Once the building is complete, link workflow artifacts such as executables, configuration files, and scripts via :: @@ -107,40 +111,19 @@ Under the ``/sorc`` folder is a script to build all components called ``build_al :: - ./build_all.sh [-a UFS_app][-g][-h][-u][-v] + ./build_all.sh [-a UFS_app][-k][-h][-v] [list of system(s) to build] -a UFS_app: Build a specific UFS app instead of the default - -g: - Build GSI + -k: + Kill all builds immediately if one fails -h: Print this help message and exit - -j: - Specify maximum number of build jobs (n) - -u: - Build UFS-DA -v: Execute all build scripts with -v option to turn on verbose where supported -For forecast-only (coupled or uncoupled) build of the components: - -:: - - ./build_all.sh - -For cycled (w/ data assimilation) use the `-g` option during build: - -:: - - ./build_all.sh -g - -For coupled cycling (include new UFSDA) use the `-gu` options during build: - -[Currently only available on Hera, Orion, and Hercules] - -:: - - ./build_all.sh -gu + Lastly, pass to build_all.sh a list of systems to build. This includes `gfs`, `gefs`, `sfs` (not fully supported), `gsi`, `gdas`, and `all`. +For examples of how to use this script, see :ref:`build examples `. ^^^^^^^^^^^^^^^ Link components @@ -156,4 +139,3 @@ After running the checkout and build scripts run the link script: Where: ``-o``: Run in operations (NCO) mode. This creates copies instead of using symlinks and is generally only used by NCO during installation into production. - diff --git a/sorc/build_all.sh b/sorc/build_all.sh index 9414846f2a..f4618b948c 100755 --- a/sorc/build_all.sh +++ b/sorc/build_all.sh @@ -13,32 +13,23 @@ set +x #------------------------------------ function _usage() { cat << EOF -Builds all of the global-workflow components by calling the individual build - scripts in sequence. +Builds all of the global-workflow components by calling the individual build scripts in parallel. -Usage: ${BASH_SOURCE[0]} [-a UFS_app][-c build_config][-d][-f][-h][-j n][-v][-w][-y] +Usage: ${BASH_SOURCE[0]} [-a UFS_app][-c build_config][-d][-f][-h][-v] [gfs] [gefs] [sfs] [gsi] [gdas] [all] -a UFS_app: - Build a specific UFS app instead of the default + Build a specific UFS app instead of the default. This will be applied to all UFS (GFS, GEFS, SFS) builds. -d: Build in debug mode -f: - Build the UFS model using the -DFASTER=ON option - -g: - Build GSI + Build the UFS model(s) using the -DFASTER=ON option. -h: Print this help message and exit - -j: - Specify maximum number of build jobs (n) -k: Kill all builds if any build fails - -u: - Build UFS-DA -v: Execute all build scripts with -v option to turn on verbose where supported - -w: - Use structured wave grid - -y: - Use hydrostatic version of FV3 + + Specified systems (gfs, gefs, sfs, gsi, gdas) are non-exclusive, so they can be built together. EOF exit 1 } @@ -48,30 +39,21 @@ readonly HOMEgfs=$(cd "$(dirname "$(readlink -f -n "${BASH_SOURCE[0]}" )" )/.." cd "${HOMEgfs}/sorc" || exit 1 _build_ufs_opt="" -_build_ufsda="NO" -_build_gsi="NO" _build_debug="" _verbose_opt="" -_wave_opt="" -_hydro_opt="" _build_job_max=20 _quick_kill="NO" _ufs_exec="-e gfs_model.x" # Reset option counter in case this script is sourced OPTIND=1 -while getopts ":a:dfghj:kuvwy" option; do +while getopts ":a:dfhkv" option; do case "${option}" in a) _build_ufs_opt+="-a ${OPTARG} ";; f) _build_ufs_opt+="-f ";; d) _build_debug="-d" ;; - g) _build_gsi="YES" ;; h) _usage;; - j) _build_job_max="${OPTARG} ";; k) _quick_kill="YES" ;; - u) _build_ufsda="YES" ;; - v) _verbose_opt="-v";; - w) _wave_opt="-w"; _ufs_exec="-e gefs_model.x";; - y) _hydro_opt="-y"; _ufs_exec="-e sfs_model.x";; + v) _verbose_opt="-v" ;; :) echo "[${BASH_SOURCE[0]}]: ${option} requires an argument" _usage @@ -82,20 +64,91 @@ while getopts ":a:dfghj:kuvwy" option; do ;; esac done - shift $((OPTIND-1)) +# If no build system was specified, build for gfs forecast-only +if [[ $# -eq 0 ]]; then + selected_systems="gfs" +else + selected_systems="$*" +fi + +supported_systems=("gfs" "gefs" "sfs" "gsi" "gdas" "all") + +declare -A system_builds +system_builds=( + ["gfs"]="ufs_gfs gfs_utils ufs_utils upp ww3_gfs" + ["gefs"]="ufs_gefs gfs_utils ufs_utils upp ww3_gefs" + ["sfs"]="ufs_sfs gfs_utils ufs_utils upp ww3_gefs" + ["gsi"]="gsi_enkf gsi_monitor gsi_utils" + ["gdas"]="gdas gsi_monitor gsi_utils" + ["all"]="ufs_gfs gfs_utils ufs_utils upp ww3_gfs ufs_gefs ufs_sfs ww3_gefs gdas gsi_enkf gsi_monitor gsi_utils" +) + logs_dir="${HOMEgfs}/sorc/logs" if [[ ! -d "${logs_dir}" ]]; then echo "Creating logs folder" mkdir -p "${logs_dir}" || exit 1 fi -# Check final exec folder exists -if [[ ! -d "${HOMEgfs}/exec" ]]; then - echo "Creating ${HOMEgfs}/exec folder" - mkdir -p "${HOMEgfs}/exec" -fi +# Jobs per build ("min max") +declare -A build_jobs build_opts build_scripts +build_jobs=( + ["ufs_gfs"]=8 ["ufs_gefs"]=8 ["ufs_sfs"]=8 ["gdas"]=8 ["gsi_enkf"]=2 ["gfs_utils"]=1 ["ufs_utils"]=1 + ["ww3_gfs"]=1 ["ww3_gefs"]=1 ["gsi_utils"]=1 ["gsi_monitor"]=1 ["gfs_utils"]=1 ["upp"]=1 +) + +# Establish build options for each job +_gfs_exec="gfs_model.x" +_gefs_exec="gefs_model.x" +_sfs_exec="sfs_model.x" +build_opts=( + ["ufs_gfs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -e ${_gfs_exec}" + ["ufs_gefs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -e ${_gefs_exec}" + ["ufs_sfs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -e ${_sfs_exec}" + ["upp"]="${_build_debug}" + ["ww3_gfs"]="${_verbose_opt} ${_build_debug}" + ["ww3_gefs"]="-w ${_verbose_opt} ${_build_debug}" + ["gdas"]="${_verbose_opt} ${_build_debug}" + ["ufs_utils"]="${_verbose_opt} ${_build_debug}" + ["gfs_utils"]="${_verbose_opt} ${_build_debug}" + ["gsi_utils"]="${_verbose_opt} ${_build_debug}" + ["gsi_enkf"]="${_verbose_opt} ${_build_debug}" + ["gsi_monitor"]="${_verbose_opt} ${_build_debug}" +) + +# Set the build script name for each build +build_scripts=( + ["ufs_gfs"]="build_ufs.sh" + ["ufs_gefs"]="build_ufs.sh" + ["ufs_sfs"]="build_ufs.sh" + ["gdas"]="build_gdas.sh" + ["gsi_enkf"]="build_gsi_enkf.sh" + ["gfs_utils"]="build_gfs_utils.sh" + ["ufs_utils"]="build_ufs_utils.sh" + ["ww3_gfs"]="build_ww3prepost.sh" + ["ww3_gefs"]="build_ww3prepost.sh" + ["gsi_utils"]="build_gsi_utils.sh" + ["gsi_monitor"]="build_gsi_monitor.sh" + ["gfs_utils"]="build_gfs_utils.sh" + ["upp"]="build_upp.sh" +) + +# Check the requested systems to make sure we can build them +declare -A builds +system_count=0 +for system in ${selected_systems}; do + # shellcheck disable=SC2076 + if [[ " ${supported_systems[*]} " =~ " ${system} " ]]; then + (( system_count += 1 )) + for build in ${system_builds["${system}"]}; do + builds["${build}"]="yes" + done + else + echo "Unsupported build system: ${system}" + _usage + fi +done #------------------------------------ # GET MACHINE @@ -108,6 +161,9 @@ if [[ -z "${MACHINE_ID}" ]]; then exit 1 fi +# Create the log directory +mkdir -p "${HOMEgfs}/sorc/logs" + #------------------------------------ # SOURCE BUILD VERSION FILES #------------------------------------ @@ -123,87 +179,18 @@ ERRSCRIPT=${ERRSCRIPT:-'eval [[ $errs = 0 ]]'} # shellcheck disable= errs=0 -declare -A build_jobs -declare -A build_opts - #------------------------------------ # Check which builds to do and assign # of build jobs #------------------------------------ -# Mandatory builds, unless otherwise specified, for the UFS -big_jobs=0 -build_jobs["ufs"]=8 -big_jobs=$((big_jobs+1)) -build_opts["ufs"]="${_wave_opt} ${_hydro_opt} ${_verbose_opt} ${_build_ufs_opt} ${_build_debug} ${_ufs_exec}" - -build_jobs["upp"]=1 -build_opts["upp"]="${_build_debug}" - -build_jobs["ufs_utils"]=1 -build_opts["ufs_utils"]="${_verbose_opt} ${_build_debug}" - -build_jobs["gfs_utils"]=1 -build_opts["gfs_utils"]="${_verbose_opt} ${_build_debug}" - -build_jobs["ww3prepost"]=1 -build_opts["ww3prepost"]="${_wave_opt} ${_verbose_opt} ${_build_ufs_opt} ${_build_debug}" - -# Optional DA builds -if [[ "${_build_ufsda}" == "YES" ]]; then - if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" && "${MACHINE_ID}" != "hercules" && "${MACHINE_ID}" != "wcoss2" && "${MACHINE_ID}" != "noaacloud" && "${MACHINE_ID}" != "gaea" ]]; then - echo "NOTE: The GDAS App is not supported on ${MACHINE_ID}. Disabling build." - else - build_jobs["gdas"]=8 - big_jobs=$((big_jobs+1)) - build_opts["gdas"]="${_verbose_opt} ${_build_debug}" - fi -fi -if [[ "${_build_gsi}" == "YES" ]]; then - build_jobs["gsi_enkf"]=2 - build_opts["gsi_enkf"]="${_verbose_opt} ${_build_debug}" -fi -if [[ "${_build_gsi}" == "YES" || "${_build_ufsda}" == "YES" ]] ; then - build_jobs["gsi_utils"]=1 - build_opts["gsi_utils"]="${_verbose_opt} ${_build_debug}" - build_jobs["gsi_monitor"]=1 - build_opts["gsi_monitor"]="${_verbose_opt} ${_build_debug}" -fi - -# Go through all builds and adjust CPU counts down if necessary -requested_cpus=0 -build_list="" -for build in "${!build_jobs[@]}"; do - if [[ -z "${build_list}" ]]; then - build_list="${build}" - else - build_list="${build_list}, ${build}" - fi - if [[ ${build_jobs[${build}]} -gt ${_build_job_max} ]]; then - build_jobs[${build}]=${_build_job_max} - fi - requested_cpus=$(( requested_cpus + build_jobs[${build}] )) -done - echo "Building ${build_list}" -# Go through all builds and adjust CPU counts up if possible -if [[ ${requested_cpus} -lt ${_build_job_max} && ${big_jobs} -gt 0 ]]; then - # Add cores to the gdas and ufs build jobs - extra_cores=$(( _build_job_max - requested_cpus )) - extra_cores=$(( extra_cores / big_jobs )) - for build in "${!build_jobs[@]}"; do - if [[ "${build}" == "gdas" || "${build}" == "ufs" ]]; then - build_jobs[${build}]=$(( build_jobs[${build}] + extra_cores )) - fi - done -fi - procs_in_use=0 declare -A build_ids check_builds() { - for chk_build in "${!build_jobs[@]}"; do + for chk_build in "${!builds[@]}"; do # Check if the build is complete and if so what the status was if [[ -n "${build_ids[${chk_build}]+0}" ]]; then if ! ps -p "${build_ids[${chk_build}]}" > /dev/null; then @@ -213,7 +200,7 @@ check_builds() echo "build_${chk_build}.sh failed! Exiting!" echo "Check logs/build_${chk_build}.log for details." echo "logs/build_${chk_build}.log" > "${HOMEgfs}/sorc/logs/error.logs" - for kill_build in "${!build_jobs[@]}"; do + for kill_build in "${!builds[@]}"; do if [[ -n "${build_ids[${kill_build}]+0}" ]]; then pkill -P "${build_ids[${kill_build}]}" fi @@ -228,15 +215,15 @@ check_builds() builds_started=0 # Now start looping through all of the jobs until everything is done -while [[ ${builds_started} -lt ${#build_jobs[@]} ]]; do - for build in "${!build_jobs[@]}"; do +while [[ ${builds_started} -lt ${#builds[@]} ]]; do + for build in "${!builds[@]}"; do # Has the job started? if [[ -n "${build_jobs[${build}]+0}" && -z "${build_ids[${build}]+0}" ]]; then # Do we have enough processors to run it? if [[ ${_build_job_max} -ge $(( build_jobs[build] + procs_in_use )) ]]; then # double-quoting build_opts here will not work since it is a string of options #shellcheck disable=SC2086 - "./build_${build}.sh" ${build_opts[${build}]:-} -j "${build_jobs[${build}]}" > \ + "./${build_scripts[${build}]}" ${build_opts[${build}]:-} -j "${build_jobs[${build}]}" > \ "${logs_dir}/build_${build}.log" 2>&1 & build_ids["${build}"]=$! echo "Starting build_${build}.sh" @@ -249,7 +236,7 @@ while [[ ${builds_started} -lt ${#build_jobs[@]} ]]; do # Also recalculate how many processors are in use to account for completed builds builds_started=0 procs_in_use=0 - for build in "${!build_jobs[@]}"; do + for build in "${!builds[@]}"; do # Has the build started? if [[ -n "${build_ids[${build}]+0}" ]]; then builds_started=$(( builds_started + 1)) @@ -275,7 +262,7 @@ done # Wait for all jobs to complete and check return statuses -while [[ "${#build_jobs[@]}" -gt 0 ]]; do +while [[ "${#builds[@]}" -gt 0 ]]; do # If requested, check if any build has failed and exit if so if [[ "${_quick_kill}" == "YES" ]]; then @@ -286,7 +273,7 @@ while [[ "${#build_jobs[@]}" -gt 0 ]]; do fi fi - for build in "${!build_jobs[@]}"; do + for build in "${!builds[@]}"; do # Test if each job is complete and if so, notify and remove from the array if [[ -n "${build_ids[${build}]+0}" ]]; then if ! ps -p "${build_ids[${build}]}" > /dev/null; then @@ -294,14 +281,14 @@ while [[ "${#build_jobs[@]}" -gt 0 ]]; do build_stat=$? errs=$((errs+build_stat)) if [[ ${build_stat} == 0 ]]; then - echo "build_${build}.sh completed successfully!" + echo "${build_scripts[${build}]} completed successfully!" else - echo "build_${build}.sh failed with status ${build_stat}!" + echo "${build_scripts[${build}]} failed with status ${build_stat}!" fi # Remove the completed build from the list of PIDs unset 'build_ids[${build}]' - unset 'build_jobs[${build}]' + unset 'builds[${build}]' fi fi done diff --git a/sorc/build_compute.sh b/sorc/build_compute.sh new file mode 100755 index 0000000000..794b4fa350 --- /dev/null +++ b/sorc/build_compute.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash + +function _usage() { + cat << EOF +Builds all of the global-workflow components on compute nodes. + +Usage: ${BASH_SOURCE[0]} [-h][-v][-A ] [ gfs gefs sfs gsi gdas all] + -h: + Print this help message and exit + -v: + Verbose mode + -A: + HPC account to use for the compute-node builds + (default is \$HOMEgfs/ci/platforms/config.\$machine:\$HPC_ACCOUNT) + + Input arguments are the system(s) to build. + Valid options are + "gfs", "gefs", "sfs", "gsi", "gdas", or "all". + (default is "gfs") +EOF + exit 1 +} +# This script launches compute-node builds of selected submodules +# Two positional arguments are accepted: + +set -eu + +rocoto_verbose_opt="" +verbose="NO" +build_xml="build.xml" +build_db="build.db" +build_lock_db="build_lock.db" + +OPTIND=1 +while getopts ":hA:v" option; do + case "${option}" in + h) _usage;; + A) export HPC_ACCOUNT="${OPTARG}" ;; + v) verbose="YES" && rocoto_verbose_opt="-v10";; + :) + echo "[${BASH_SOURCE[0]}]: ${option} requires an argument" + _usage + ;; + *) + echo "[${BASH_SOURCE[0]}]: Unrecognized option: ${option}" + _usage + ;; + esac +done +shift $((OPTIND-1)) + +# Set build system to gfs if not specified +if [[ $# -eq 0 ]]; then + systems="gfs" +else + systems=$* +fi + +if [[ "${verbose}" == "YES" ]]; then + set -x +fi + +# shellcheck disable=SC2155,SC2312 +HOMEgfs=$(cd "$(dirname "$(readlink -f -n "${BASH_SOURCE[0]}" )" )/.." && pwd -P) +cd "${HOMEgfs}/sorc" || exit 1 + +# Delete the rocoto XML and database if they exist +rm -f "${build_xml}" "${build_db}" "${build_lock_db}" + +echo "Sourcing global-workflow modules ..." +source "${HOMEgfs}/workflow/gw_setup.sh" + +echo "Generating build.xml for building global-workflow programs on compute nodes ..." +# Catch errors manually from here out +set +e +"${HOMEgfs}/workflow/build_compute.py" --yaml "${HOMEgfs}/workflow/build_opts.yaml" --systems "${systems}" +rc=$? +if (( rc != 0 )); then + echo "FATAL ERROR: ${BASH_SOURCE[0]} failed to create 'build.xml' with error code ${rc}" + exit 1 +fi + +echo "Launching builds in parallel on compute nodes ..." +runcmd="rocotorun -w ${build_xml} -d ${build_db} ${rocoto_verbose_opt}" + +finished=false +${runcmd} +echo "Running builds on compute nodes" +while [[ "${finished}" == "false" ]]; do + sleep 3m + ${runcmd} + state="$("${HOMEgfs}/ci/scripts/utils/rocotostat.py" -w "${build_xml}" -d "${build_db}")" + if [[ "${verbose_opt}" == "true" ]]; then + echo "Rocoto is in state ${state}" + else + echo -n "." + fi + + if [[ "${state}" == "DONE" ]]; then + finished=true + elif [[ "${state}" == "RUNNING" ]]; then + finished=false + elif [[ "${state}" == "DEAD" ]]; then + echo "FATAL ERROR: ${BASH_SOURCE[0]} one or more builds failed!" + # TODO add capability to determine which build(s) failed + exit 2 + else + echo "FATAL ERROR: ${BASH_SOURCE[0]} rocoto failed with state '${state}'" + exit 3 + fi +done + +echo "All builds completed successfully!" + +exit 0 diff --git a/sorc/build_upp.sh b/sorc/build_upp.sh index e217e171db..15e2dfb146 100755 --- a/sorc/build_upp.sh +++ b/sorc/build_upp.sh @@ -26,6 +26,31 @@ if [[ ! -d "../exec" ]]; then mkdir -p ../exec fi +# The UPP does not load a cmake module and the WCOSS2 compute nodes do not have cmake in PATH by default +# Add cmake to the default modules if the command isn't found +# TODO remove this workaround when issue NOAA-EMC/UPP#1106 is addressed. +if ! command -v cmake >& /dev/null; then + export COMPILER="intel" + if [[ -z ${HOMEgfs+x} ]]; then + # shellcheck disable=SC2155 + readonly HOMEgfs=$(cd "$(dirname "$(readlink -f -n "${BASH_SOURCE[0]}" )" )/.." && pwd -P) + fi + source "${HOMEgfs}/ush/detect_machine.sh" + if [[ "${MACHINE_ID}" == "wcoss2" ]]; then + set +x + module try-load cmake + + if module is-loaded cmake; then + LMOD_SYSTEM_DEFAULT_MODULES="${LMOD_SYSTEM_DEFAULT_MODULES} cmake" + echo "Added cmake to the default modules" + else + echo "FATAL ERROR Could not find cmake or a cmake module!" + exit 2 + fi + set -x + fi +fi + cd ufs_model.fd/FV3/upp/tests # shellcheck disable=SC2086 BUILD_JOBS=${BUILD_JOBS:-8} ./compile_upp.sh ${_opts} diff --git a/workflow/build_compute.py b/workflow/build_compute.py new file mode 100755 index 0000000000..7787e9ad40 --- /dev/null +++ b/workflow/build_compute.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 + +""" +Entry point for setting up a compute-node build +""" + +import os +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from typing import Dict + +from wxflow import parse_yaml, AttrDict + +from hosts import Host +import rocoto.rocoto as rocoto + + +_here = os.path.dirname(__file__) +HOMEgfs = os.path.abspath(os.path.join(os.path.abspath(_here), '..')) + + +def input_args(*argv): + """ + Method to collect user arguments for `compute_build.py` + """ + + description = """ + Setup files and directories to start a compute build. + """ + + parser = ArgumentParser(description=description, + formatter_class=ArgumentDefaultsHelpFormatter) + + parser.add_argument('--yaml', help='Input YAML file', + type=str, required=False, default='build_opts.yaml') + parser.add_argument('--account', help='HPC account to use; default is host-dependent', required=False, default=os.getenv('HPC_ACCOUNT')) + parser.add_argument('--systems', help='System(s) to build (options: gfs, gefs, sfs, gsi, gdas, or all)', required=False, default='gfs') + + inputs = parser.parse_args(list(*argv) if len(argv) else None) + + return inputs + + +def get_task_spec(task_name: str, task_spec: Dict, host_spec: Dict) -> Dict: + """ + Generate a task specification dictionary for a given task. + + Parameters + ---------- + task_name: str + The name of the task. + task_spec: Dict + The specification of the task, containing command, walltime, and cores. + host_spec: Dict + The specification of the host, containing account, queue, partition, and native. + + Returns: + -------- + task_dict: Dict + A dictionary containing the task specification, including resources and other task-related information. + """ + + task_dict = AttrDict() + task_dict.task_name = task_name + task_dict.cycledef = "build" + task_dict.maxtries = 1 + task_dict.command = f"cd {HOMEgfs}/sorc/; {task_spec.command}" + task_dict.job_name = task_name + task_dict.log = f"{HOMEgfs}/sorc/logs/{task_name}.log" + + task_dict.resources = AttrDict() + task_dict.resources.account = host_spec.account + task_dict.resources.queue = host_spec.queue + task_dict.resources.partition = host_spec.partition + task_dict.resources.walltime = task_spec.walltime + task_dict.resources.native = host_spec.native + task_dict.resources.memory = None + task_dict.resources.nodes = 1 + task_dict.resources.ntasks = task_spec.cores + task_dict.resources.ppn = task_spec.cores + task_dict.resources.threads = 1 + + return task_dict + + +def get_host_specs(host: Dict) -> Dict: + """Generate host specs for the build.xml file based on Host() info + + Parameters + ---------- + host : Dict + Host information returned by Host() + + Returns + ------- + specs: Dict + Consolidated compute specifics needed for the XML + """ + + native = None + partition = None + + if host.info.SCHEDULER in ['pbspro']: + native = '-l place=vscatter' + elif host.info.SCHEDULER in ['slurm']: + native = '--export=NONE' + if host.info.PARTITION_BATCH not in [""]: + partition = host.info.PARTITION_BATCH + + if host.info.RESERVATION not in [""]: + native += f' --reservation={host.info.RESERVATION}' + + if host.info.CLUSTERS not in [""]: + native += f' --clusters={host.info.CLUSTERS}' + + specs = AttrDict() + specs.scheduler = host.info.SCHEDULER + specs.account = host.info.ACCOUNT + specs.queue = host.info.QUEUE + specs.partition = partition + specs.native = native + + return specs + + +def main(*argv): + + user_inputs = input_args(*argv) + host_specs = get_host_specs(Host()) + + # Update the default host account if the user supplied one + if user_inputs.account is not None: + host_specs.account = user_inputs.account + + build_specs = AttrDict(parse_yaml(user_inputs.yaml)) + + systems = user_inputs.systems.split() if "all" not in user_inputs.systems else ["all"] + + # Determine systems to build + builds = set() + if systems[0] == "all": + builds = build_specs.build + else: + builds.update(build_specs.systems["common"]) + try: + for system in systems: + builds.update(build_specs.systems[system]) + except KeyError as e: + raise KeyError(f"{system} is not a valid global-workflow system!") from e + + # Build the task specs from the build specs and host specs + task_specs = AttrDict() + for task_name, task_spec in build_specs.build.items(): + if task_name in builds: + task_specs[task_name] = get_task_spec(task_name, task_spec, host_specs) + + # Start building the XML + strings = ['', + '', + f'', + f'\t{HOMEgfs}/sorc/logs/build.log', + '\t190001010000 190001010000 24:00:00', + '\n'] + xml_header = '\n'.join(strings) + xml_footer = '\n\n' + + task_list = [] + for _, task_spec in task_specs.items(): + task_list.append(rocoto.create_task(task_spec)) + xml_tasks = '\n'.join(task_list) + + xml = ''.join([xml_header, xml_tasks, xml_footer]) + xml_file = f"{HOMEgfs}/sorc/build.xml" + with open(xml_file, 'w') as fh: + fh.write(xml) + + +if __name__ == '__main__': + main() diff --git a/workflow/build_opts.yaml b/workflow/build_opts.yaml new file mode 100644 index 0000000000..464701c2f3 --- /dev/null +++ b/workflow/build_opts.yaml @@ -0,0 +1,94 @@ +systems: + common: + - "ufs_utils" + - "gfs_utils" + - "upp" + gfs: + - "gfs_model" + - "gfs_ww3prepost" + gsi: + - "gsi_enkf" + - "gsi_utils" + - "gsi_monitor" + gdas: + - "gdas" + - "gsi_utils" + - "gsi_monitor" + gefs: + - "gefs_model" + - "gefs_ww3_prepost" + sfs: + - "sfs_model" + - "gefs_ww3_prepost" +build: + gfs_model: + command: "./build_ufs.sh -e gfs_model.x -j 12" + log: "build_ufs_gfs.log" + cores: 12 + walltime: "00:30:00" + + gfs_ww3prepost: + command: "./build_ww3prepost.sh -j 4" + log: "build_ww3prepost_gfs.log" + cores: 4 + walltime: "00:10:00" + + gefs_model: + command: "./build_ufs.sh -w -e gefs_model.x -j 12" + log: "build_ufs_gefs.log" + cores: 12 + walltime: "00:30:00" + + gefs_ww3_prepost: + command: "./build_ww3prepost.sh -w -j 4" + log: "build_ww3prepost_gefs.log" + cores: 4 + walltime: "00:10:00" + + sfs_model: + command: "./build_ufs.sh -y -e sfs_model.x -j 12" + log: "build_ufs_sfs.log" + cores: 12 + walltime: "00:30:00" + + upp: + command: "./build_upp.sh -j 8" + log: "build_upp.log" + cores: 8 + walltime: "00:10:00" + + gsi_enkf: + command: "./build_gsi_enkf.sh -j 8" + log: "build_gsi_enkf.log" + cores: 8 + walltime: "00:15:00" + + gsi_monitor: + command: "./build_gsi_monitor.sh -j 4" + log: "build_gsi_monitor.log" + cores: 4 + walltime: "00:10:00" + + gsi_utils: + command: "./build_gsi_utils.sh -j 6" + log: "build_gsi_utils.log" + cores: 6 + walltime: "00:10:00" + + ufs_utils: + command: "./build_ufs_utils.sh -j 8" + log: "build_ufs_utils.log" + cores: 8 + walltime: "00:10:00" + + gfs_utils: + command: "./build_gfs_utils.sh -j 6" + log: "build_gfs_utils.log" + cores: 6 + walltime: "00:10:00" + + gdas: + command: "./build_gdas.sh -j 12" + log: "build_gdas.log" + cores: 12 + walltime: "01:00:00" diff --git a/workflow/generate_workflows.sh b/workflow/generate_workflows.sh index c98fa3028a..a5615a8b0d 100755 --- a/workflow/generate_workflows.sh +++ b/workflow/generate_workflows.sh @@ -19,11 +19,6 @@ function _usage() { -b Run build_all.sh with default flags (build the UFS, UPP, UFS_Utils, and GFS-utils only - -B "build flags" - Run build_all.sh with the build specified flags. Refer to - build_all.sh -h for a list of valid flags. - NOTE: the list of build flags MUST be in quotes. - -u Update submodules before building and/or generating experiments. -y "list of YAMLs to run" @@ -37,13 +32,12 @@ function _usage() { -G Run all valid GFS cases in the specified YAML directory. If -b is specified, then "-g -u" (build the GSI and GDASApp) - will be passed to build_all.sh unless -B is also specified. + will be passed to build_all.sh. Note that these builds are disabled on some systems, which will result in a warning from build_all.sh. -E Run all valid GEFS cases in the specified YAML directory. - If -b is specified, then "-w" will be passed to build_all.sh - unless -B is also specified. + If -b is specified, then "-w" will be passed to build_all.sh. -S (Not yet supported!) Run all valid SFS cases in the specified YAML directory. @@ -91,7 +85,6 @@ HOMEgfs="" _specified_home=false _build=false _build_flags="" -_explicit_build_flags=false _update_submods=false declare -a _yaml_list=("C48_ATM") _specified_yaml_list=false @@ -126,7 +119,6 @@ while [[ $# -gt 0 && "$1" != "--" ]]; do fi ;; b) _build=true ;; - B) _build_flags="${OPTARG}" && _explicit_build_flags=true ;; u) _update_submods=true ;; y) # Start over with an empty _yaml_list declare -a _yaml_list=() @@ -231,18 +223,6 @@ else done fi -# Test if multiple "run_all" options were set -_count_run_alls=0 -[[ "${_run_all_gfs}" == "true" ]] && ((_count_run_alls+=1)) -[[ "${_run_all_gefs}" == "true" ]] && ((_count_run_alls+=1)) -[[ "${_run_all_sfs}" == "true" ]] && ((_count_run_alls+=1)) - -if (( _count_run_alls > 1 )) ; then - echo "Only one run all option (-G -E -S) may be specified" - echo "Rerun with just one option and/or with -h for usage examples" - exit 5 -fi - # If -S is specified, exit (for now). # TODO when SFS tests come online, enable this option. if [[ "${_run_all_sfs}" == "true" ]]; then @@ -277,7 +257,7 @@ function select_all_yamls() # Bash cannot return an array from a function and any edits are descoped at # the end of the function, so use a nameref instead. - local -n _nameref_yaml_list='_yaml_list' + local -n _nameref_yaml_list="${2}" if [[ "${_specified_yaml_list}" == false ]]; then # Start over with an empty _yaml_list @@ -328,21 +308,20 @@ EOM # Check if running all GEFS cases if [[ "${_run_all_gefs}" == "true" ]]; then # Append -w to build_all.sh flags if -E was specified - if [[ "${_explicit_build_flags}" == "false" && "${_build}" == "true" ]]; then - _build_flags="-w" - fi + _build_flags="${_build_flags} gefs " - select_all_yamls "gefs" + declare -a _gefs_yaml_list + select_all_yamls "gefs" "_gefs_yaml_list" + _yaml_list=("${_yaml_list[@]}" "${_gefs_yaml_list[@]}") fi -# Check if running all SFS cases +# Check if running all GFS cases if [[ "${_run_all_gfs}" == "true" ]]; then - # Append -g -u to build_all.sh flags if -G was specified - if [[ "${_explicit_build_flags}" == "false" && "${_build}" == "true" ]]; then - _build_flags="-g -u" - fi + _build_flags="${_build_flags} gfs " - select_all_yamls "gfs" + declare -a _gfs_yaml_list + select_all_yamls "gfs" "_gfs_yaml_list" + _yaml_list=("${_yaml_list[@]}" "${_gfs_yaml_list[@]}") fi # Loading modules sometimes raises unassigned errors, so disable checks @@ -397,7 +376,7 @@ if [[ "${_build}" == "true" ]]; then printf "Building via build_all.sh %s\n\n" "${_build_flags}" # Let the output of build_all.sh go to stdout regardless of verbose options #shellcheck disable=SC2086,SC2248 - ${HOMEgfs}/sorc/build_all.sh ${_build_flags} ${_verbose_flag} + ${HOMEgfs}/sorc/build_all.sh ${_verbose_flag} ${_build_flags} fi # Link the workflow silently unless there's an error From bdc0e290797f0966656b80b255fc7225b46bcd1a Mon Sep 17 00:00:00 2001 From: Eric Sinsky - NOAA <48259628+EricSinsky-NOAA@users.noreply.github.com> Date: Wed, 25 Dec 2024 23:12:33 -0500 Subject: [PATCH 06/33] Fix mod_icec bug in atmos_prod (#3167) The purpose of this PR is to fix a bug that causes the ensstat task to occasionally produce a segmentation fault error. This segmentation fault error is due to undefined values not being skipped in the ensemble spread and mean calculations in the ensstat program. The reason undefined values were not being skipped is because variables with undefined values were not using bitmap in the pgrb files. Ensstat expects undefined variables to use bitmap, otherwise ensstat will not skip those undefined values. The undefined variables were not using bitmap because of a bug in the atmos_prod task, where the mod_icec function was not being skipped for grib2 files that did not contain the LAND and ICEC variables. In the [offline UPP](https://github.com/NOAA-EMC/UPP/blob/develop/ush/fv3gfs_dwn_nems.sh), the mod_icec functionality was being executed when LAND and ICEC variables existed in the grib2 file. This same condition has been applied in this PR for mod_icec. Resolves #3150 --- ush/interp_atmos_master.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ush/interp_atmos_master.sh b/ush/interp_atmos_master.sh index 4c4ee4b03c..3a3edc470b 100755 --- a/ush/interp_atmos_master.sh +++ b/ush/interp_atmos_master.sh @@ -53,7 +53,11 @@ export err=$?; err_chk # trim and mask for all grids for grid in "${grids[@]}"; do trim_rh "${output_file_prefix}_${grid}"; export err=$?; err_chk - mod_icec "${output_file_prefix}_${grid}"; export err=$?; err_chk + # shellcheck disable=SC2312 + var_count=$(${WGRIB2} "${output_file_prefix}_${grid}" -match "LAND|ICEC" |wc -l) + if [[ "${var_count}" -eq 2 ]]; then + mod_icec "${output_file_prefix}_${grid}"; export err=$?; err_chk + fi done exit 0 From 1c37f907ecaadd835580d5bb73a0eaf2a146f2fc Mon Sep 17 00:00:00 2001 From: Travis Elless <113720457+TravisElless-NOAA@users.noreply.github.com> Date: Wed, 25 Dec 2024 23:13:30 -0500 Subject: [PATCH 07/33] Remove early-cycle EnKF forecast (#3185) Currently GFS experiments with the early-cycle EnKF call identical jobs to the late-cycle. However, since the forecast portion of the early-cycle is handled through the GEFS workflow, the forecast and post jobs are not needed in the GFS early-cycle EnKF. This PR removes calling the early-cycle EnFK forecast and post jobs in GFS experiments, and adds statements to the archive yamls to only search for forecast files during the late-cyle EnKF. --- parm/archive/enkf.yaml.j2 | 4 ++++ parm/archive/enkf_grp.yaml.j2 | 2 ++ parm/archive/enkf_restartb_grp.yaml.j2 | 2 ++ scripts/exgdas_enkf_earc.py | 2 +- workflow/applications/gfs_cycled.py | 4 +++- workflow/rocoto/gfs_tasks.py | 5 ++++- 6 files changed, 16 insertions(+), 3 deletions(-) diff --git a/parm/archive/enkf.yaml.j2 b/parm/archive/enkf.yaml.j2 index 9f9ad296f8..12167198cb 100644 --- a/parm/archive/enkf.yaml.j2 +++ b/parm/archive/enkf.yaml.j2 @@ -3,6 +3,7 @@ enkf: target: "{{ ATARDIR }}/{{ cycle_YMDH }}/{{ RUN }}.tar" required: # Logs + {% if RUN == 'enkfgdas' %} {% for mem in range(1, nmem_ens + 1) %} - "logs/{{ cycle_YMDH }}/{{ RUN }}_fcst_mem{{ '%03d' % mem }}.log" {% endfor %} @@ -10,6 +11,7 @@ enkf: - "logs/{{ cycle_YMDH }}/{{ RUN }}_epos{{ '%03d' % (fhr - fhmin) }}.log" {% endfor %} - "logs/{{ cycle_YMDH }}/{{ RUN }}_echgres.log" + {% endif %} - "logs/{{ cycle_YMDH }}/{{ RUN }}_esfc.log" {% for grp in range(IAUFHRS | length) %} - "logs/{{ cycle_YMDH }}/{{ RUN }}_ecen{{ '%03d' % grp }}.log" @@ -37,6 +39,7 @@ enkf: {% endfor %} # Ensemble mean and spread + {% if RUN == 'enkfgdas' %} {% for fhr in range(3, fhmax + 1, 3) %} - "{{ COMIN_ATMOS_HISTORY_ENSSTAT | relpath(ROTDIR) }}/{{ head }}atmf{{ '%03d' % fhr }}.ensmean.nc" - "{{ COMIN_ATMOS_HISTORY_ENSSTAT | relpath(ROTDIR) }}/{{ head }}sfcf{{ '%03d' % fhr }}.ensmean.nc" @@ -44,6 +47,7 @@ enkf: - "{{ COMIN_ATMOS_HISTORY_ENSSTAT | relpath(ROTDIR) }}/{{ head }}atmf{{ '%03d' % fhr }}.ensspread.nc" {% endif %} {% endfor %} + {% endif %} # Ensemble mean state {% if not DO_JEDIATMENS %} diff --git a/parm/archive/enkf_grp.yaml.j2 b/parm/archive/enkf_grp.yaml.j2 index 933ca45caf..3b58bbb27d 100644 --- a/parm/archive/enkf_grp.yaml.j2 +++ b/parm/archive/enkf_grp.yaml.j2 @@ -10,12 +10,14 @@ enkf_grp: {% set COMIN_ATMOS_RESTART_MEM = COMIN_ATMOS_RESTART_MEM_list[imem] %} # Forecast data + {% if RUN == 'enkfgdas' %} {% for fhr in range(3, 10, 3) %} - "{{ COMIN_ATMOS_HISTORY_MEM | relpath(ROTDIR) }}/{{ head }}atmf{{ "%03d" % fhr }}.nc" {% endfor %} # Only store the 6-hour surface forecast - "{{ COMIN_ATMOS_HISTORY_MEM | relpath(ROTDIR) }}/{{ head }}sfcf006.nc" + {% endif %} # Store the individual member analysis data {% if not lobsdiag_forenkf %} diff --git a/parm/archive/enkf_restartb_grp.yaml.j2 b/parm/archive/enkf_restartb_grp.yaml.j2 index c7aaf6682e..50595a6bbf 100644 --- a/parm/archive/enkf_restartb_grp.yaml.j2 +++ b/parm/archive/enkf_restartb_grp.yaml.j2 @@ -22,6 +22,7 @@ enkf_restartb_grp: {% endfor %} # Now get the restart files. + {% if RUN == 'enkfgdas' %} {% for r_time in range(restart_interval, fhmax + 1, restart_interval) %} {% set r_timedelta = (r_time | string + "H") | to_timedelta %} {% set r_dt = current_cycle | add_to_datetime(r_timedelta) %} @@ -38,3 +39,4 @@ enkf_restartb_grp: - "{{ COMIN_ATMOS_RESTART_MEM | relpath(ROTDIR) }}/{{ r_prefix }}.fv_core.res.nc" {% endfor %} {% endfor %} + {% endif %} diff --git a/scripts/exgdas_enkf_earc.py b/scripts/exgdas_enkf_earc.py index 535dd2ea37..107d541a41 100755 --- a/scripts/exgdas_enkf_earc.py +++ b/scripts/exgdas_enkf_earc.py @@ -28,7 +28,7 @@ def main(): 'DOHYBVAR', 'DOIAU_ENKF', 'IAU_OFFSET', 'DOIAU', 'DO_CA', 'DO_CALC_INCREMENT', 'assim_freq', 'ARCH_CYC', 'DO_JEDISNOWDA', 'ARCH_WARMICFREQ', 'ARCH_FCSTICFREQ', - 'IAUFHRS_ENKF', 'NET'] + 'IAUFHRS_ENKF', 'NET', 'NMEM_ENS_GFS'] archive_dict = AttrDict() for key in keys: diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 543d7a9d8c..5ecfddf276 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -317,7 +317,9 @@ def get_task_names(self): task_names[run].append('echgres') if 'gdas' in run else 0 task_names[run] += ['ediag'] if options['lobsdiag_forenkf'] else ['eomg'] task_names[run].append('esnowanl') if options['do_jedisnowda'] and 'gdas' in run else 0 + task_names[run].append('efcs') if 'gdas' in run else 0 + task_names[run].append('epos') if 'gdas' in run else 0 - task_names[run] += ['stage_ic', 'ecen', 'esfc', 'efcs', 'epos', 'earc', 'cleanup'] + task_names[run] += ['stage_ic', 'ecen', 'esfc', 'earc', 'cleanup'] return task_names diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 54870b79cc..9b6f712380 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -2896,7 +2896,10 @@ def _get_eposgroups(epos): def earc(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.run}_epmn'} + if 'enkfgdas' in self.run: + dep_dict = {'type': 'metatask', 'name': f'{self.run}_epmn'} + else: + dep_dict = {'type': 'task', 'name': f'{self.run}_esfc'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) From 21f804885bdf875760e78f527882fd1df55b95a5 Mon Sep 17 00:00:00 2001 From: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> Date: Thu, 2 Jan 2025 21:41:59 +0000 Subject: [PATCH 08/33] Fix GEFS and SFS compile flags in build_all.sh (#3197) # Description The build_all.sh script was fixed to send the correct flags to build_ufs.sh for GEFS (added a missing `-w`) and SFS (added a missing `-y`) executables. Issue reported by @AntonMFernando-NOAA. --- sorc/build_all.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sorc/build_all.sh b/sorc/build_all.sh index f4618b948c..13d7c9c78b 100755 --- a/sorc/build_all.sh +++ b/sorc/build_all.sh @@ -104,8 +104,8 @@ _gefs_exec="gefs_model.x" _sfs_exec="sfs_model.x" build_opts=( ["ufs_gfs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -e ${_gfs_exec}" - ["ufs_gefs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -e ${_gefs_exec}" - ["ufs_sfs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -e ${_sfs_exec}" + ["ufs_gefs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -w -e ${_gefs_exec}" + ["ufs_sfs"]="${wave_opt} ${_build_ufs_opt} ${_verbose_opt} ${_build_debug} -y -e ${_sfs_exec}" ["upp"]="${_build_debug}" ["ww3_gfs"]="${_verbose_opt} ${_build_debug}" ["ww3_gefs"]="-w ${_verbose_opt} ${_build_debug}" From da9d717a3b9bcf0869477753552ddccea0c6d806 Mon Sep 17 00:00:00 2001 From: RussTreadon-NOAA <26926959+RussTreadon-NOAA@users.noreply.github.com> Date: Thu, 2 Jan 2025 20:25:12 -0500 Subject: [PATCH 09/33] Add efcs and epos to ufs_hybatm xml (#3192) (#3193) # Description g-w PR #3185 inadvertently removed metatasks enkfgdas_fcst and enkfgdas_epmn from the ufs_hybatmDA xml. This PR modifies `workflow/applications/gfs_cycled.py` so that these metatasks are once again in the ufs_hybatmDA xml. Resolves: #3192 --- parm/archive/enkf_restartb_grp.yaml.j2 | 2 +- workflow/applications/gfs_cycled.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/parm/archive/enkf_restartb_grp.yaml.j2 b/parm/archive/enkf_restartb_grp.yaml.j2 index 50595a6bbf..bf5656bf8c 100644 --- a/parm/archive/enkf_restartb_grp.yaml.j2 +++ b/parm/archive/enkf_restartb_grp.yaml.j2 @@ -38,5 +38,5 @@ enkf_restartb_grp: - "{{ COMIN_ATMOS_RESTART_MEM | relpath(ROTDIR) }}/{{ r_prefix }}.coupler.res" - "{{ COMIN_ATMOS_RESTART_MEM | relpath(ROTDIR) }}/{{ r_prefix }}.fv_core.res.nc" {% endfor %} - {% endfor %} {% endif %} + {% endfor %} diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 5ecfddf276..4df03b9444 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -317,9 +317,9 @@ def get_task_names(self): task_names[run].append('echgres') if 'gdas' in run else 0 task_names[run] += ['ediag'] if options['lobsdiag_forenkf'] else ['eomg'] task_names[run].append('esnowanl') if options['do_jedisnowda'] and 'gdas' in run else 0 - task_names[run].append('efcs') if 'gdas' in run else 0 - task_names[run].append('epos') if 'gdas' in run else 0 + task_names[run].append('efcs') if 'gdas' in run else 0 + task_names[run].append('epos') if 'gdas' in run else 0 task_names[run] += ['stage_ic', 'ecen', 'esfc', 'earc', 'cleanup'] return task_names From e5d857b060361cea5e5692ae6bb33e4299f0a08e Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Fri, 3 Jan 2025 18:44:43 -0500 Subject: [PATCH 10/33] Set runtime shell requirements within Jenkins Pipeline (#3171) This PR updates the Jenkins Pipeline script to source `gw_setup.sh` from within the PR itself for when ever a shell command is issued that needs it. Currently the runtime bash shell environment requirements for running the **global-workflow** (set by sourcing `$HOMEgfs/workflow/gw_setup.sh`) is established by the user's shell that makes the connection from the Java remote agent to the Jenkins Controller. This update simply sources `gw_setup.sh` directly during run time when ever needed instead. **NOTE:** This does not apply for **GitHub CLI** and **pyGitHub** since they are not currently supported by **Spack Stack** and therefore not incorporated in to `gw_setup.sh`. These remaining dependances are the responsibility of the _role account_ holders that manage the Jenkins Controller to Node Agent connections. --------- Co-authored-by: Terry McGuinness Co-authored-by: Rahul Mahajan --- ci/Jenkinsfile | 45 +++++++++++++++++++++------ ci/scripts/utils/launch_java_agent.sh | 15 ++++----- 2 files changed, 44 insertions(+), 16 deletions(-) diff --git a/ci/Jenkinsfile b/ci/Jenkinsfile index b7a29e15b0..67ac0fdb17 100644 --- a/ci/Jenkinsfile +++ b/ci/Jenkinsfile @@ -138,8 +138,14 @@ pipeline { } } try { - sh(script: "${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --repo PR_BUILD_${env.CHANGE_ID}") - gist_url=sh(script: "${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --gist PR_BUILD_${env.CHANGE_ID}", returnStdout: true).trim() + sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --repo PR_BUILD_${env.CHANGE_ID} + """) + gist_url=sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --gist PR_BUILD_${env.CHANGE_ID} + """, returnStdout: true).trim() sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Build **FAILED** on **${Machine}** in Build# ${env.BUILD_NUMBER} with error logs:\n\\`\\`\\`\n${error_logs_message}\\`\\`\\`\n\nFollow link here to view the contents of the above file(s): [(link)](${gist_url})" """) } catch (Exception error_comment) { echo "Failed to comment on PR: ${error_comment.getMessage()}" @@ -158,7 +164,10 @@ pipeline { } } // Get a list of CI cases to run - CI_CASES = sh(script: "${HOMEgfs}/ci/scripts/utils/get_host_case_list.py ${machine}", returnStdout: true).trim().split() + CI_CASES = sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/get_host_case_list.py ${machine} + """, returnStdout: true).trim().split() echo "Cases to run: ${CI_CASES}" } } @@ -179,7 +188,10 @@ pipeline { script { env.RUNTESTS = "${CUSTOM_WORKSPACE}/RUNTESTS" try { - error_output = sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${caseName}.yaml", returnStdout: true).trim() + error_output = sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${caseName}.yaml + """, returnStdout: true).trim() } catch (Exception error_create) { sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "${Case} **FAILED** to create experiment on ${Machine} in BUILD# ${env.BUILD_NUMBER}\n with the error:\n\\`\\`\\`\n${error_output}\\`\\`\\`" """) error("Case ${caseName} failed to create experiment directory") @@ -194,10 +206,19 @@ pipeline { def error_file = "${CUSTOM_WORKSPACE}/RUNTESTS/${pslot}_error.logs" sh(script: " rm -f ${error_file}") try { - sh(script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${CUSTOM_WORKSPACE} ${pslot} 'global-workflow'") - sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh cleanup_experiment ${CUSTOM_WORKSPACE}/RUNTESTS/EXPDIR/${pslot}") + sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/run-check_ci.sh ${CUSTOM_WORKSPACE} ${pslot} 'global-workflow' + """) + sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh cleanup_experiment ${CUSTOM_WORKSPACE}/RUNTESTS/EXPDIR/${pslot} + """) } catch (Exception error_experment) { - sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh cancel_batch_jobs ${pslot}") + sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh cancel_batch_jobs ${pslot} + """) ws(CUSTOM_WORKSPACE) { def error_logs = "" def error_logs_message = "" @@ -217,9 +238,15 @@ pipeline { } } try { - gist_url = sh(script: "${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --gist PR_${env.CHANGE_ID}", returnStdout: true).trim() + gist_url = sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --gist PR_${env.CHANGE_ID} + """, returnStdout: true).trim() sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Experiment ${caseName} **FAILED** on ${Machine} in Build# ${env.BUILD_NUMBER} with error logs:\n\\`\\`\\`\n${error_logs_message}\\`\\`\\`\n\nFollow link here to view the contents of the above file(s): [(link)](${gist_url})" """) - sh(script: "${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --repo PR_${env.CHANGE_ID}") + sh(script: """ + source ${HOMEgfs}/workflow/gw_setup.sh + ${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --repo PR_${env.CHANGE_ID} + """) } catch (Exception error_comment) { echo "Failed to comment on PR: ${error_comment.getMessage()}" } diff --git a/ci/scripts/utils/launch_java_agent.sh b/ci/scripts/utils/launch_java_agent.sh index eb78d3b1ef..ad79a75cbd 100755 --- a/ci/scripts/utils/launch_java_agent.sh +++ b/ci/scripts/utils/launch_java_agent.sh @@ -65,14 +65,14 @@ controller_url="https://jenkins.epic.oarcloud.noaa.gov" controller_user=${controller_user:-"terry.mcguinness"} controller_user_auth_token="jenkins_token" -HOMEgfs="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." >/dev/null 2>&1 && pwd )" +HOMEGFS_="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." >/dev/null 2>&1 && pwd )" host=$(hostname) ######################################################################### # Set up runtime environment varibles for accounts on supproted machines ######################################################################### -source "${HOMEgfs}/ush/detect_machine.sh" +source "${HOMEGFS_}/ush/detect_machine.sh" case ${MACHINE_ID} in hera | orion | hercules | wcoss2 | gaea) echo "Launch Jenkins Java Controler on ${MACHINE_ID}";; @@ -84,10 +84,10 @@ esac LOG=lanuched_agent-$(date +%Y%m%d%M).log rm -f "${LOG}" -source "${HOMEgfs}/ush/module-setup.sh" -module use "${HOMEgfs}/modulefiles" +source "${HOMEGFS_}/ush/module-setup.sh" +module use "${HOMEGFS_}/modulefiles" module load "module_gwsetup.${MACHINE_ID}" -source "${HOMEgfs}/ci/platforms/config.${MACHINE_ID}" +source "${HOMEGFS_}/ci/platforms/config.${MACHINE_ID}" JAVA_HOME="${JENKINS_AGENT_LANUCH_DIR}/JAVA/jdk-17.0.10" if [[ ! -d "${JAVA_HOME}" ]]; then @@ -102,9 +102,10 @@ JAVA="${JAVA_HOME}/bin/java" echo "JAVA VERSION: " ${JAVA} -version -export GH="${HOME}/bin/gh" -[[ -f "${GH}" ]] || echo "gh is not installed in ${HOME}/bin" +GH=$(command -v gh || echo "${HOME}/bin/gh") +[[ -f "${GH}" ]] || ( echo "ERROR: GitHub CLI (gh) not found. (exiting with error)"; exit 1 ) ${GH} --version +export GH check_mark=$("${GH}" auth status -t 2>&1 | grep "Token:" | awk '{print $1}') || true if [[ "${check_mark}" != "✓" ]]; then From 29089be113dc2f6da0dbade2c328cfadec54a04d Mon Sep 17 00:00:00 2001 From: Rahul Mahajan Date: Fri, 3 Jan 2025 20:28:33 -0500 Subject: [PATCH 11/33] Ensure OCNRES and ICERES have 3 digits in the archive script (#3199) This PR: - ensures the `OCNRES` and `ICERES` variables in `task_config` are 3 digits Resolves #3198 --- scripts/exglobal_archive.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/scripts/exglobal_archive.py b/scripts/exglobal_archive.py index 5ae57ca7e3..ae613fb39c 100755 --- a/scripts/exglobal_archive.py +++ b/scripts/exglobal_archive.py @@ -17,6 +17,13 @@ def main(): # Instantiate the Archive object archive = Archive(config) + # update these keys to be 3 digits if they are part of archive.task_config.keys + for key in ['OCNRES', 'ICERES']: + try: + archive.task_config[key] = f"{archive.task_config[key]:03d}" + except KeyError as ee: + logger.info(f"key ({key}) not found in archive.task_config!") + # Pull out all the configuration keys needed to run the rest of archive steps keys = ['ATARDIR', 'current_cycle', 'FHMIN', 'FHMAX', 'FHOUT', 'RUN', 'PDY', 'DO_VERFRAD', 'DO_VMINMON', 'DO_VERFOZN', 'DO_ICE', 'DO_PREP_OBS_AERO', @@ -37,16 +44,15 @@ def main(): archive_dict = AttrDict() for key in keys: - archive_dict[key] = archive.task_config.get(key) - if archive_dict[key] is None: - print(f"Warning: key ({key}) not found in task_config!") + try: + archive_dict[key] = archive.task_config[key] + except KeyError as ee: + logger.warning(f"WARNING: key ({key}) not found in archive.task_config!") # Also import all COMIN* and COMOUT* directory and template variables for key in archive.task_config.keys(): - if key.startswith("COM_") or key.startswith("COMIN_") or key.startswith("COMOUT_"): + if key.startswith(("COM_", "COMIN_", "COMOUT_")): archive_dict[key] = archive.task_config.get(key) - if archive_dict[key] is None: - print(f"Warning: key ({key}) not found in task_config!") with chdir(config.ROTDIR): From 060aec353f6999c3bfd5f82fd9f11d301a66bf60 Mon Sep 17 00:00:00 2001 From: Rahul Mahajan Date: Mon, 6 Jan 2025 09:29:35 -0500 Subject: [PATCH 12/33] Add echgres as a dependency for earc (#3202) This PR: - adds `echgres` as a dependency to the `earc` job. Resolves #3201 Resolves #3165 --- workflow/rocoto/gfs_tasks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 9b6f712380..d2a3e43719 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -2901,7 +2901,9 @@ def earc(self): else: dep_dict = {'type': 'task', 'name': f'{self.run}_esfc'} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) + dep_dict = {'type': 'task', 'name': f'{self.run}_echgres'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) earcenvars = self.envars.copy() earcenvars.append(rocoto.create_envar(name='ENSGRP', value='#grp#')) From fececaff97a16bc284b6314cea20113064596193 Mon Sep 17 00:00:00 2001 From: AntonMFernando-NOAA <167725623+AntonMFernando-NOAA@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:56:05 -0500 Subject: [PATCH 13/33] Remove multi-grid wave support (#3188) # Description - Eliminate instances where `waveGRD` is a list containing multiple supported wave grids. Also, remove the associated for loops (e.g., `for wavGRD in ${waveGRD}; do`) - Resolves #2637 --- parm/config/gefs/config.wave | 20 +- parm/config/gfs/config.wave | 20 +- scripts/exgfs_wave_init.sh | 2 +- scripts/exgfs_wave_post_gridded_sbs.sh | 16 +- ush/forecast_postdet.sh | 25 +-- ush/forecast_predet.sh | 33 +-- ush/parsing_namelists_WW3.sh | 287 ++++++------------------- ush/parsing_ufs_configure.sh | 1 - ush/wave_grid_interp_sbs.sh | 5 +- 9 files changed, 81 insertions(+), 328 deletions(-) diff --git a/parm/config/gefs/config.wave b/parm/config/gefs/config.wave index 6a1529274a..be37e7edb2 100644 --- a/parm/config/gefs/config.wave +++ b/parm/config/gefs/config.wave @@ -16,28 +16,10 @@ export RUNwave="${RUN}wave" export RUNRSTwave="gdas" #grid dependent variable defaults -export waveGRDN='1' # grid number for ww3_multi -export waveGRDG='10' # grid group for ww3_multi -export USE_WAV_RMP='NO' # YES/NO rmp grid remapping pre-processed coefficients -export waveMULTIGRID='.false.' # .true./.false. for multi or shel export MESH_WAV="mesh.${waveGRD}.nc" # Mesh grid for wave model for CMEPS -export waveesmfGRD=' ' # input grid for multigrid #Grid dependent variables for various grids case "${waveGRD}" in - "gnh_10m;aoc_9km;gsh_15m") - #GFSv16 settings: - export waveGRDN='1 2 3' - export waveGRDG='10 20 30' - export USE_WAV_RMP='YES' - export waveMULTIGRID='.true.' - export IOSRV='3' - export MESH_WAV=' ' - export waveesmfGRD='glox_10m' - export waveuoutpGRD='points' - export waveinterpGRD='glo_15mxt at_10m ep_10m wc_10m glo_30m' - export wavepostGRD='gnh_10m aoc_9km gsh_15m' - ;; "gwes_30m") #Grid used for P8 export waveinterpGRD='' @@ -157,7 +139,7 @@ export WW3CURIENS='F' export GOFILETYPE=1 # GOFILETYPE=1 one gridded file per output step export POFILETYPE=1 # POFILETYPE=1 one point file per output step -# Parameters for ww3_multi/shel.inp +# Parameters for ww3_shel.inp # Unified output T or F export FUNIPNT='T' # Output server type (see ww3_shel/multi.inp in WW3 repo) diff --git a/parm/config/gfs/config.wave b/parm/config/gfs/config.wave index ea68508547..e792f45473 100644 --- a/parm/config/gfs/config.wave +++ b/parm/config/gfs/config.wave @@ -16,28 +16,10 @@ export RUNwave="${RUN}wave" export RUNRSTwave="gdas" #grid dependent variable defaults -export waveGRDN='1' # grid number for ww3_multi -export waveGRDG='10' # grid group for ww3_multi -export USE_WAV_RMP='NO' # YES/NO rmp grid remapping pre-processed coefficients -export waveMULTIGRID='.false.' # .true./.false. for multi or shel export MESH_WAV="mesh.${waveGRD}.nc" # Mesh grid for wave model for CMEPS -export waveesmfGRD=' ' # input grid for multigrid #Grid dependent variables for various grids case "${waveGRD}" in - "gnh_10m;aoc_9km;gsh_15m") - #GFSv16 settings: - export waveGRDN='1 2 3' - export waveGRDG='10 20 30' - export USE_WAV_RMP='YES' - export waveMULTIGRID='.true.' - export IOSRV='3' - export MESH_WAV=' ' - export waveesmfGRD='glox_10m' - export waveuoutpGRD='points' - export waveinterpGRD='glo_15mxt at_10m ep_10m wc_10m glo_30m' - export wavepostGRD='gnh_10m aoc_9km gsh_15m' - ;; "gwes_30m") #Grid used for P8 export waveinterpGRD='' @@ -187,7 +169,7 @@ export WW3CURIENS='F' export GOFILETYPE=1 # GOFILETYPE=1 one gridded file per output step export POFILETYPE=1 # POFILETYPE=1 one point file per output step -# Parameters for ww3_multi/shel.inp +# Parameters for ww3_shel.inp # Unified output T or F export FUNIPNT='T' # Output server type (see ww3_shel/multi.inp in WW3 repo) diff --git a/scripts/exgfs_wave_init.sh b/scripts/exgfs_wave_init.sh index 17e6cec042..dc174f258e 100755 --- a/scripts/exgfs_wave_init.sh +++ b/scripts/exgfs_wave_init.sh @@ -79,7 +79,7 @@ source "${USHgfs}/preamble.sh" chmod 744 cmdfile # Eliminate duplicate grids - array=($WAVECUR_FID $WAVEICE_FID $WAVEWND_FID $waveuoutpGRD $waveGRD $waveesmfGRD $wavepostGRD $waveinterpGRD) + array=($WAVECUR_FID $WAVEICE_FID $WAVEWND_FID $waveuoutpGRD $waveGRD $wavepostGRD $waveinterpGRD) grdALL=$(printf "%s\n" "${array[@]}" | sort -u | tr '\n' ' ') for grdID in ${grdALL}; do diff --git a/scripts/exgfs_wave_post_gridded_sbs.sh b/scripts/exgfs_wave_post_gridded_sbs.sh index a241a00d88..423d6af694 100755 --- a/scripts/exgfs_wave_post_gridded_sbs.sh +++ b/scripts/exgfs_wave_post_gridded_sbs.sh @@ -241,15 +241,13 @@ source "${USHgfs}/preamble.sh" if [ $fhr = $fhrg ] then - for wavGRD in ${waveGRD}; do - gfile="${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_grd.${wavGRD}.${YMD}.${HMS}" - if [[ ! -s "${gfile}" ]]; then - echo " FATAL ERROR : NO RAW FIELD OUTPUT FILE ${gfile}" - err=3; export err; "${errchk}" - exit "${err}" - fi - ${NLN} "${gfile}" "./out_grd.${wavGRD}" - done + gfile="${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_grd.${waveGRD}.${YMD}.${HMS}" + if [[ ! -s "${gfile}" ]]; then + echo " FATAL ERROR : NO RAW FIELD OUTPUT FILE ${gfile}" + err=3; export err; "${errchk}" + exit "${err}" + fi + ${NLN} "${gfile}" "./out_grd.${waveGRD}" if [ "$DOGRI_WAV" = 'YES' ] then diff --git a/ush/forecast_postdet.sh b/ush/forecast_postdet.sh index 432e6f690d..25cd4d36f0 100755 --- a/ush/forecast_postdet.sh +++ b/ush/forecast_postdet.sh @@ -375,14 +375,7 @@ WW3_postdet() { # Link output files local wavprfx="${RUN}wave${WAV_MEMBER:-}" - if [[ "${waveMULTIGRID}" == ".true." ]]; then - ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.log.mww3.${PDY}${cyc}" "log.mww3" - for ww3_grid in ${waveGRD}; do - ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.log.${ww3_grid}.${PDY}${cyc}" "log.${ww3_grid}" - done - else - ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.log.${waveGRD}.${PDY}${cyc}" "log.ww3" - fi + ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.log.${waveGRD}.${PDY}${cyc}" "log.ww3" # Loop for gridded output (uses FHINC) local fhr vdate FHINC ww3_grid @@ -390,13 +383,8 @@ WW3_postdet() { fhinc=${FHOUT_WAV} while (( fhr <= FHMAX_WAV )); do vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d.%H0000) - if [[ "${waveMULTIGRID}" == ".true." ]]; then - for ww3_grid in ${waveGRD} ; do - ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.out_grd.${ww3_grid}.${vdate}" "${DATA}/${vdate}.out_grd.${ww3_grid}" - done - else - ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.out_grd.${waveGRD}.${vdate}" "${DATA}/${vdate}.out_grd.ww3" - fi + ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.out_grd.${waveGRD}.${vdate}" "${DATA}/${vdate}.out_grd.ww3" + if (( FHMAX_HF_WAV > 0 && FHOUT_HF_WAV > 0 && fhr < FHMAX_HF_WAV )); then fhinc=${FHOUT_HF_WAV} fi @@ -408,11 +396,8 @@ WW3_postdet() { fhinc=${FHINCP_WAV} while (( fhr <= FHMAX_WAV )); do vdate=$(date --utc -d "${current_cycle:0:8} ${current_cycle:8:2} + ${fhr} hours" +%Y%m%d.%H0000) - if [[ "${waveMULTIGRID}" == ".true." ]]; then - ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.out_pnt.${waveuoutpGRD}.${vdate}" "${DATA}/${vdate}.out_pnt.${waveuoutpGRD}" - else - ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.out_pnt.${waveuoutpGRD}.${vdate}" "${DATA}/${vdate}.out_pnt.ww3" - fi + ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.out_pnt.${waveuoutpGRD}.${vdate}" "${DATA}/${vdate}.out_pnt.ww3" + fhr=$((fhr + fhinc)) done } diff --git a/ush/forecast_predet.sh b/ush/forecast_predet.sh index 59afe483ea..14f32378c3 100755 --- a/ush/forecast_predet.sh +++ b/ush/forecast_predet.sh @@ -591,22 +591,9 @@ WW3_predet(){ # Files from wave prep and wave init jobs # Copy mod_def files for wave grids local ww3_grid - if [[ "${waveMULTIGRID}" == ".true." ]]; then - local array=("${WAVECUR_FID}" "${WAVEICE_FID}" "${WAVEWND_FID}" "${waveuoutpGRD}" "${waveGRD}" "${waveesmfGRD}") - echo "Wave Grids: ${array[*]}" - local grdALL - # shellcheck disable=SC2312 - grdALL=$(printf "%s\n" "${array[@]}" | sort -u | tr '\n' ' ') - - for ww3_grid in ${grdALL}; do - ${NCP} "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${ww3_grid}" "${DATA}/mod_def.${ww3_grid}" \ - || ( echo "FATAL ERROR: Failed to copy '${RUN}wave.mod_def.${ww3_grid}' from '${COMIN_WAVE_PREP}'"; exit 1 ) - done - else - #if shel, only 1 waveGRD which is linked to mod_def.ww3 - ${NCP} "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${waveGRD}" "${DATA}/mod_def.ww3" \ - || ( echo "FATAL ERROR: Failed to copy '${RUN}wave.mod_def.${waveGRD}' from '${COMIN_WAVE_PREP}'"; exit 1 ) - fi + #if shel, only 1 waveGRD which is linked to mod_def.ww3 + ${NCP} "${COMIN_WAVE_PREP}/${RUN}wave.mod_def.${waveGRD}" "${DATA}/mod_def.ww3" \ + || ( echo "FATAL ERROR: Failed to copy '${RUN}wave.mod_def.${waveGRD}' from '${COMIN_WAVE_PREP}'"; exit 1 ) if [[ "${WW3ICEINP}" == "YES" ]]; then local wavicefile="${COMIN_WAVE_PREP}/${RUN}wave.${WAVEICE_FID}.t${current_cycle:8:2}z.ice" @@ -638,20 +625,6 @@ WW3_predet(){ fi WAV_MOD_TAG="${RUN}wave${waveMEMB}" - if [[ "${USE_WAV_RMP:-YES}" == "YES" ]]; then - local file file_array file_count - # shellcheck disable=SC2312 - mapfile -t file_array < <(find "${FIXgfs}/wave" -name "rmp_src_to_dst_conserv_*" | sort) - file_count=${#file_array[@]} - if (( file_count > 0 )); then - for file in "${file_array[@]}" ; do - ${NCP} "${file}" "${DATA}/" - done - else - echo 'FATAL ERROR : No rmp precomputed nc files found for wave model, ABORT!' - exit 4 - fi - fi } # shellcheck disable=SC2034 diff --git a/ush/parsing_namelists_WW3.sh b/ush/parsing_namelists_WW3.sh index c57a90e50a..5ee4944c18 100755 --- a/ush/parsing_namelists_WW3.sh +++ b/ush/parsing_namelists_WW3.sh @@ -75,26 +75,14 @@ WW3_namelists(){ # --------------------------------------------------------------------------- # -# Create ww3_multi/shel.inp +# Create ww3_shel.inp - if [ $waveMULTIGRID = ".true." ]; then - # ww3_multi template - if [ -f ${PARMgfs}/wave/ww3_multi.inp.tmpl ]; then - cp ${PARMgfs}/wave/ww3_multi.inp.tmpl ww3_multi.inp.tmpl - fi - if [ ! -f ww3_multi.inp.tmpl ]; then - echo "ABNORMAL EXIT: NO TEMPLATE FOR WW3 MULTI INPUT FILE" - exit 11 - fi - else - # ww3_multi template - if [ -f ${PARMgfs}/wave/ww3_shel.inp.tmpl ]; then - cp ${PARMgfs}/wave/ww3_shel.inp.tmpl ww3_shel.inp.tmpl - fi - if [ ! -f ww3_shel.inp.tmpl ]; then - echo "ABNORMAL EXIT: NO TEMPLATE FOR WW3 SHEL INPUT FILE" - exit 12 - fi + if [ -f "${PARMgfs}/wave/ww3_shel.inp.tmpl" ]; then + cp "${PARMgfs}/wave/ww3_shel.inp.tmpl" "ww3_shel.inp.tmpl" + fi + if [ ! -f ww3_shel.inp.tmpl ]; then + echo "ABNORMAL EXIT: NO TEMPLATE FOR WW3 SHEL INPUT FILE" + exit 12 fi # Buoy location file @@ -114,213 +102,60 @@ WW3_namelists(){ exit 12 fi - - -if [ $waveMULTIGRID = ".true." ]; then -#multi - # Initialize inp file parameters - NFGRIDS=0 - NMGRIDS=0 - CPLILINE='$' - ICELINE='$' - ICEFLAG='no' - CURRLINE='$' - CURRFLAG='no' - WINDLINE='$' - WINDFLAG='no' - UNIPOINTS='$' - -# Check for required inputs and coupling options - if [ $waveuoutpGRD ] - then - UNIPOINTS="'$waveuoutpGRD'" - fi - -# Check if waveesmfGRD is set - if [ ${waveesmfGRD} ] - then - NFGRIDS=$(expr $NFGRIDS + 1) - fi - - case ${WW3ATMINP} in - 'YES' ) - NFGRIDS=$(expr $NFGRIDS + 1) - WINDLINE=" '$WAVEWND_FID' F F T F F F F F F" - WINDFLAG="$WAVEWND_FID" - ;; - 'CPL' ) - WNDIFLAG='T' - if [ ${waveesmfGRD} ] - then - WINDFLAG="CPL:${waveesmfGRD}" - CPLILINE=" '${waveesmfGRD}' F F T F F F F F F" - else - WINDFLAG="CPL:native" - fi - ;; - esac - - case ${WW3ICEINP} in - 'YES' ) - NFGRIDS=$(expr $NFGRIDS + 1) - ICEIFLAG='T' - ICELINE=" '$WAVEICE_FID' F F F T F F F F F" - ICEFLAG="$WAVEICE_FID" - ;; - 'CPL' ) - ICEIFLAG='T' - if [ ${waveesmfGRD} ] - then - ICEFLAG="CPL:${waveesmfGRD}" - CPLILINE=" '${waveesmfGRD}' F F ${WNDIFLAG} T F F F F F" - else - ICEFLAG="CPL:native" - fi - ;; - esac - - case ${WW3CURINP} in - 'YES' ) - if [ "$WAVECUR_FID" != "$WAVEICE_FID" ]; then - NFGRIDS=$(expr $NFGRIDS + 1) - CURRLINE=" '$WAVECUR_FID' F T F F F F F F F" - CURRFLAG="$WAVECUR_FID" - else # cur fields share the same grid as ice grid - ICELINE=" '$WAVEICE_FID' F T F ${ICEIFLAG} F F F F F" - CURRFLAG="$WAVEICE_FID" - fi - ;; - 'CPL' ) - CURIFLAG='T' - if [ ${waveesmfGRD} ] - then - CURRFLAG="CPL:${waveesmfGRD}" - CPLILINE=" '${waveesmfGRD}' F T ${WNDIFLAG} ${ICEFLAG} F F F F F" - else - CURRFLAG="CPL:native" - fi - ;; - esac - - unset agrid - agrid= - gline= - GRDN=0 -# grdGRP=1 # Single group for now - for grid in ${waveGRD} - do - GRDN=$(expr ${GRDN} + 1) - agrid=( ${agrid[*]} ${grid} ) - NMGRIDS=$(expr $NMGRIDS + 1) - gridN=$(echo $waveGRDN | awk -v i=$GRDN '{print $i}') - gridG=$(echo $waveGRDG | awk -v i=$GRDN '{print $i}') - gline="${gline}'${grid}' 'no' 'CURRFLAG' 'WINDFLAG' 'ICEFLAG' 'no' 'no' 'no' 'no' 'no' ${gridN} ${gridG} 0.00 1.00 F\n" - done - gline="${gline}\$" - echo $gline - - sed -e "s/NFGRIDS/$NFGRIDS/g" \ - -e "s/NMGRIDS/${NMGRIDS}/g" \ - -e "s/FUNIPNT/${FUNIPNT}/g" \ - -e "s/IOSRV/${IOSRV}/g" \ - -e "s/FPNTPROC/${FPNTPROC}/g" \ - -e "s/FGRDPROC/${FGRDPROC}/g" \ - -e "s/OUTPARS/${OUTPARS_WAV}/g" \ - -e "s/CPLILINE/${CPLILINE}/g" \ - -e "s/UNIPOINTS/${UNIPOINTS}/g" \ - -e "s/GRIDLINE/${gline}/g" \ - -e "s/ICELINE/$ICELINE/g" \ - -e "s/CURRLINE/$CURRLINE/g" \ - -e "s/WINDLINE/$WINDLINE/g" \ - -e "s/ICEFLAG/$ICEFLAG/g" \ - -e "s/CURRFLAG/$CURRFLAG/g" \ - -e "s/WINDFLAG/$WINDFLAG/g" \ - -e "s/RUN_BEG/$time_beg/g" \ - -e "s/RUN_END/$time_end/g" \ - -e "s/OUT_BEG/$time_beg_out/g" \ - -e "s/OUT_END/$time_end/g" \ - -e "s/DTFLD/ $DTFLD_WAV/g" \ - -e "s/FLAGMASKCOMP/ $FLAGMASKCOMP/g" \ - -e "s/FLAGMASKOUT/ $FLAGMASKOUT/g" \ - -e "s/GOFILETYPE/ $GOFILETYPE/g" \ - -e "s/POFILETYPE/ $POFILETYPE/g" \ - -e "s/DTPNT/ $DTPNT_WAV/g" \ - -e "/BUOY_FILE/r buoy.loc" \ - -e "s/BUOY_FILE/DUMMY/g" \ - -e "s/RST_BEG/$time_rst_ini/g" \ - -e "s/RSTTYPE/$RSTTYPE_WAV/g" \ - -e "s/RST_2_BEG/$time_rst2_ini/g" \ - -e "s/DTRST/$DT_1_RST_WAV/g" \ - -e "s/DT_2_RST/$DT_2_RST_WAV/g" \ - -e "s/RST_END/$time_rst1_end/g" \ - -e "s/RST_2_END/$time_rst2_end/g" \ - ww3_multi.inp.tmpl | \ - sed -n "/DUMMY/!p" > ww3_multi.inp - - rm -f ww3_multi.inp.tmpl buoy.loc - - cat ww3_multi.inp - -else - #ww3_shel - -# Initialize inp file parameters - ICELINE='F F' - CURRLINE='F F' - WINDLINE='F F' - - case ${WW3ATMINP} in - 'YES' ) - WINDLINE="T F";; - 'CPL' ) - WINDLINE="C F";; - esac - - case ${WW3ICEINP} in - 'YES' ) - ICELINE="T F";; - 'CPL' ) - ICELINE="C F";; - esac - - case ${WW3CURINP} in - 'YES' ) - CURRLINE="T F";; - 'CPL' ) - CURRLINE="C F";; - esac - - sed -e "s/IOSRV/${IOSRV}/g" \ - -e "s/OUTPARS/${OUTPARS_WAV}/g" \ - -e "s/ICELINE/$ICELINE/g" \ - -e "s/CURRLINE/$CURRLINE/g" \ - -e "s/WINDLINE/$WINDLINE/g" \ - -e "s/RUN_BEG/$time_beg/g" \ - -e "s/RUN_END/$time_end/g" \ - -e "s/OUT_BEG/$time_beg_out/g" \ - -e "s/OUT_END/$time_end/g" \ - -e "s/DTFLD/ $DTFLD_WAV/g" \ - -e "s/GOFILETYPE/ $GOFILETYPE/g" \ - -e "s/POFILETYPE/ $POFILETYPE/g" \ - -e "s/DTPNT/ $DTPNT_WAV/g" \ - -e "s/DTPNT/ $DTPNT_WAV/g" \ - -e "/BUOY_FILE/r buoy.loc" \ - -e "s/BUOY_FILE/DUMMY/g" \ - -e "s/RST_BEG/$time_rst_ini/g" \ - -e "s/RSTTYPE/$RSTTYPE_WAV/g" \ - -e "s/RST_2_BEG/$time_rst2_ini/g" \ - -e "s/DTRST/$DT_1_RST_WAV/g" \ - -e "s/DT_2_RST/$DT_2_RST_WAV/g" \ - -e "s/RST_END/$time_rst1_end/g" \ - -e "s/RST_2_END/$time_rst2_end/g" \ - ww3_shel.inp.tmpl | \ - sed -n "/DUMMY/!p" > ww3_shel.inp - - rm -f ww3_shel.inp.tmpl buoy.loc - - cat ww3_shel.inp - -fi +ICELINE='F F' +CURRLINE='F F' +WINDLINE='F F' + +case ${WW3ATMINP} in + 'YES' ) + WINDLINE="T F";; + 'CPL' ) + WINDLINE="C F";; +esac + +case ${WW3ICEINP} in + 'YES' ) + ICELINE="T F";; + 'CPL' ) + ICELINE="C F";; +esac + +case ${WW3CURINP} in + 'YES' ) + CURRLINE="T F";; + 'CPL' ) + CURRLINE="C F";; +esac + +sed -e "s/IOSRV/${IOSRV}/g" \ + -e "s/OUTPARS/${OUTPARS_WAV}/g" \ + -e "s/ICELINE/$ICELINE/g" \ + -e "s/CURRLINE/$CURRLINE/g" \ + -e "s/WINDLINE/$WINDLINE/g" \ + -e "s/RUN_BEG/$time_beg/g" \ + -e "s/RUN_END/$time_end/g" \ + -e "s/OUT_BEG/$time_beg_out/g" \ + -e "s/OUT_END/$time_end/g" \ + -e "s/DTFLD/ $DTFLD_WAV/g" \ + -e "s/GOFILETYPE/ $GOFILETYPE/g" \ + -e "s/POFILETYPE/ $POFILETYPE/g" \ + -e "s/DTPNT/ $DTPNT_WAV/g" \ + -e "s/DTPNT/ $DTPNT_WAV/g" \ + -e "/BUOY_FILE/r buoy.loc" \ + -e "s/BUOY_FILE/DUMMY/g" \ + -e "s/RST_BEG/$time_rst_ini/g" \ + -e "s/RSTTYPE/$RSTTYPE_WAV/g" \ + -e "s/RST_2_BEG/$time_rst2_ini/g" \ + -e "s/DTRST/$DT_1_RST_WAV/g" \ + -e "s/DT_2_RST/$DT_2_RST_WAV/g" \ + -e "s/RST_END/$time_rst1_end/g" \ + -e "s/RST_2_END/$time_rst2_end/g" \ + ww3_shel.inp.tmpl | \ +sed -n "/DUMMY/!p" > ww3_shel.inp + +rm -f ww3_shel.inp.tmpl buoy.loc + +cat ww3_shel.inp } diff --git a/ush/parsing_ufs_configure.sh b/ush/parsing_ufs_configure.sh index d8276476c4..7ee699ef0a 100755 --- a/ush/parsing_ufs_configure.sh +++ b/ush/parsing_ufs_configure.sh @@ -74,7 +74,6 @@ if [[ "${cplwav}" = ".true." ]]; then local wav_model="ww3" local wav_petlist_bounds="$(( ATMPETS+OCNPETS+ICEPETS )) $(( ATMPETS+OCNPETS+ICEPETS+WAVPETS-1 ))" local wav_omp_num_threads="${WAVTHREADS}" - local MULTIGRID="${waveMULTIGRID}" local WW3_user_sets_restname="false" local WW3_user_histname="false" diff --git a/ush/wave_grid_interp_sbs.sh b/ush/wave_grid_interp_sbs.sh index d56c5d0763..c046afb2e9 100755 --- a/ush/wave_grid_interp_sbs.sh +++ b/ush/wave_grid_interp_sbs.sh @@ -89,13 +89,12 @@ source "${USHgfs}/preamble.sh" fi ${NLN} "${DATA}/${grdID}_interp.inp.tmpl" "${grdID}_interp.inp.tmpl" - for ID in ${waveGRD}; do - ${NLN} "${DATA}/output_${ymdh}0000/out_grd.${ID}" "out_grd.${ID}" - done + ${NLN} "${DATA}/output_${ymdh}0000/out_grd.${waveGRD}" "out_grd.${waveGRD}" for ID in ${waveGRD} ${grdID}; do ${NLN} "${DATA}/mod_def.${ID}" "mod_def.${ID}" done + # --------------------------------------------------------------------------- # # 1. Generate GRID file with all data From f1afac99e738e7df688f3878902af7e0b1d09ec8 Mon Sep 17 00:00:00 2001 From: Christopher Hill <102273578+ChristopherHill-NOAA@users.noreply.github.com> Date: Tue, 7 Jan 2025 14:49:32 -0500 Subject: [PATCH 14/33] Remove 5WAVH from AWIPS GRIB2 parm files (#3146) # Description As referred within #3019, the variable 5WAVH is being removed from each of the files `parm/wmo/grib2_awpgfs[000-240].003` and `parm/wmo/grib2_awpgfs_20km_[ak,conus,pac,prico]f000` for the purpose of remedying "error code 30" that was generated through the execution of `exgfs_atmos_awips_20km_1p0deg.sh` during the GFSv17 HR4 test run. Obsolete code is also being removed from the script `exgfs_atmos_awips_20km_1p0deg.sh`. No other errors mentioned in #3019 are addressed in this PR. # Type of change - [x] Bug fix (fixes something broken) - [ ] New feature (adds functionality) - [ ] Maintenance (code refactor, clean-up, new CI test, etc.) # Change characteristics - Is this a breaking change (a change in existing functionality)? NO - Does this change require a documentation update? NO - Does this change require an update to any of the following submodules? NO (If YES, please add a link to any PRs that are pending.) - [ ] EMC verif-global - [ ] GDAS - [ ] GFS-utils - [ ] GSI - [ ] GSI-monitor - [ ] GSI-utils - [ ] UFS-utils - [ ] UFS-weather-model - [ ] wxflow # How has this been tested? Removal of variable 5WAVH from the GRIB2 files should allow completion of TOCGRIB2 processing (within `exgfs_atmos_awips_20km_1p0deg.sh`) of the GRIB2 files. @RuiyuSun, or the GW team, may wish to include the requested modifications for future GFSv17 tests that include post-processing jobs. # Checklist - [ ] Any dependent changes have been merged and published - [ ] My code follows the style guidelines of this project - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have documented my code, including function, input, and output descriptions - [ ] My changes generate no new warnings - [ ] New and existing tests pass with my changes - [ ] This change is covered by an existing CI test or a new one has been added - [ ] Any new scripts have been added to the .github/CODEOWNERS file with owners - [ ] I have made corresponding changes to the system documentation if necessary Co-authored-by: christopher hill Co-authored-by: Rahul Mahajan Co-authored-by: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> --- parm/wmo/grib2_awpgfs000.003 | 1 - parm/wmo/grib2_awpgfs006.003 | 1 - parm/wmo/grib2_awpgfs012.003 | 1 - parm/wmo/grib2_awpgfs018.003 | 1 - parm/wmo/grib2_awpgfs024.003 | 1 - parm/wmo/grib2_awpgfs030.003 | 1 - parm/wmo/grib2_awpgfs036.003 | 1 - parm/wmo/grib2_awpgfs042.003 | 1 - parm/wmo/grib2_awpgfs048.003 | 1 - parm/wmo/grib2_awpgfs054.003 | 1 - parm/wmo/grib2_awpgfs060.003 | 1 - parm/wmo/grib2_awpgfs066.003 | 1 - parm/wmo/grib2_awpgfs072.003 | 1 - parm/wmo/grib2_awpgfs078.003 | 1 - parm/wmo/grib2_awpgfs084.003 | 1 - parm/wmo/grib2_awpgfs090.003 | 1 - parm/wmo/grib2_awpgfs096.003 | 1 - parm/wmo/grib2_awpgfs102.003 | 1 - parm/wmo/grib2_awpgfs108.003 | 1 - parm/wmo/grib2_awpgfs114.003 | 1 - parm/wmo/grib2_awpgfs120.003 | 1 - parm/wmo/grib2_awpgfs126.003 | 1 - parm/wmo/grib2_awpgfs132.003 | 1 - parm/wmo/grib2_awpgfs138.003 | 1 - parm/wmo/grib2_awpgfs144.003 | 1 - parm/wmo/grib2_awpgfs150.003 | 1 - parm/wmo/grib2_awpgfs156.003 | 1 - parm/wmo/grib2_awpgfs162.003 | 1 - parm/wmo/grib2_awpgfs168.003 | 1 - parm/wmo/grib2_awpgfs174.003 | 1 - parm/wmo/grib2_awpgfs180.003 | 1 - parm/wmo/grib2_awpgfs186.003 | 1 - parm/wmo/grib2_awpgfs192.003 | 1 - parm/wmo/grib2_awpgfs198.003 | 1 - parm/wmo/grib2_awpgfs204.003 | 1 - parm/wmo/grib2_awpgfs210.003 | 1 - parm/wmo/grib2_awpgfs216.003 | 1 - parm/wmo/grib2_awpgfs222.003 | 1 - parm/wmo/grib2_awpgfs228.003 | 1 - parm/wmo/grib2_awpgfs234.003 | 1 - parm/wmo/grib2_awpgfs240.003 | 1 - parm/wmo/grib2_awpgfs_20km_akf000 | 1 - parm/wmo/grib2_awpgfs_20km_conusf000 | 1 - parm/wmo/grib2_awpgfs_20km_pacf000 | 1 - parm/wmo/grib2_awpgfs_20km_pricof000 | 1 - scripts/exgfs_atmos_awips_20km_1p0deg.sh | 10 ---------- 46 files changed, 55 deletions(-) diff --git a/parm/wmo/grib2_awpgfs000.003 b/parm/wmo/grib2_awpgfs000.003 index 941ecf6e70..c3a317d241 100644 --- a/parm/wmo/grib2_awpgfs000.003 +++ b/parm/wmo/grib2_awpgfs000.003 @@ -104,4 +104,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPA92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 81 0 0 1 0 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPA92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 81 0 0 1 0 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPA92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 81 0 0 1 0 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPA50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 81 0 0 1 0 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs006.003 b/parm/wmo/grib2_awpgfs006.003 index 58fe211e0d..bdfeaab370 100644 --- a/parm/wmo/grib2_awpgfs006.003 +++ b/parm/wmo/grib2_awpgfs006.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPB92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 6 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPB92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 6 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPB92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 6 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPB50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 6 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs012.003 b/parm/wmo/grib2_awpgfs012.003 index 8c94bb7044..b367878e63 100644 --- a/parm/wmo/grib2_awpgfs012.003 +++ b/parm/wmo/grib2_awpgfs012.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPC92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 12 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPC92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 12 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPC92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 12 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPC50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 12 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs018.003 b/parm/wmo/grib2_awpgfs018.003 index 845f26a747..d89f2d2c73 100644 --- a/parm/wmo/grib2_awpgfs018.003 +++ b/parm/wmo/grib2_awpgfs018.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPD92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 18 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPD92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 18 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPD92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 18 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPD50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 18 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs024.003 b/parm/wmo/grib2_awpgfs024.003 index 56eadc6903..e446f48766 100644 --- a/parm/wmo/grib2_awpgfs024.003 +++ b/parm/wmo/grib2_awpgfs024.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPE92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 24 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPE92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 24 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPE92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 24 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPE50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 24 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs030.003 b/parm/wmo/grib2_awpgfs030.003 index d2d37c03b0..59081135d7 100644 --- a/parm/wmo/grib2_awpgfs030.003 +++ b/parm/wmo/grib2_awpgfs030.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPF92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 30 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPF92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 30 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPF92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 30 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPF50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 30 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs036.003 b/parm/wmo/grib2_awpgfs036.003 index 7cbe4d0aaf..4c18e96a12 100644 --- a/parm/wmo/grib2_awpgfs036.003 +++ b/parm/wmo/grib2_awpgfs036.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPG92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 36 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPG92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 36 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPG92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 36 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPG50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 36 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs042.003 b/parm/wmo/grib2_awpgfs042.003 index 1be18748b3..ad2a74b711 100644 --- a/parm/wmo/grib2_awpgfs042.003 +++ b/parm/wmo/grib2_awpgfs042.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPH92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 42 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPH92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 42 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPH92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 42 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPH50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 42 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs048.003 b/parm/wmo/grib2_awpgfs048.003 index d5fce80054..131398d546 100644 --- a/parm/wmo/grib2_awpgfs048.003 +++ b/parm/wmo/grib2_awpgfs048.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPI92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 48 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPI92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 48 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPI92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 48 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPI50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 48 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs054.003 b/parm/wmo/grib2_awpgfs054.003 index 92069b8110..1fa69592ea 100644 --- a/parm/wmo/grib2_awpgfs054.003 +++ b/parm/wmo/grib2_awpgfs054.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPM92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 54 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPM92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 54 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPM92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 54 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPM50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 54 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs060.003 b/parm/wmo/grib2_awpgfs060.003 index 6c5d7edf6c..a6bfd58475 100644 --- a/parm/wmo/grib2_awpgfs060.003 +++ b/parm/wmo/grib2_awpgfs060.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPJ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 60 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPJ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 60 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPJ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 60 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPJ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 60 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs066.003 b/parm/wmo/grib2_awpgfs066.003 index d322d35665..546db96456 100644 --- a/parm/wmo/grib2_awpgfs066.003 +++ b/parm/wmo/grib2_awpgfs066.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPN92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 66 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPN92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 66 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPN92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 66 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPN50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 66 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs072.003 b/parm/wmo/grib2_awpgfs072.003 index 3a7a4c48d8..fbab37a632 100644 --- a/parm/wmo/grib2_awpgfs072.003 +++ b/parm/wmo/grib2_awpgfs072.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPK92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 72 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPK92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 72 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPK92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 72 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPK50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 72 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs078.003 b/parm/wmo/grib2_awpgfs078.003 index 3556e82380..3b94cf4346 100644 --- a/parm/wmo/grib2_awpgfs078.003 +++ b/parm/wmo/grib2_awpgfs078.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPT92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 78 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPT92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 78 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPT92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 78 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPT50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 78 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs084.003 b/parm/wmo/grib2_awpgfs084.003 index 3f3d88eab2..20777aee0f 100644 --- a/parm/wmo/grib2_awpgfs084.003 +++ b/parm/wmo/grib2_awpgfs084.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPL92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 84 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPL92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 84 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPL92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 84 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPL50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 84 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs090.003 b/parm/wmo/grib2_awpgfs090.003 index dc606a19b2..8e1709bbf3 100644 --- a/parm/wmo/grib2_awpgfs090.003 +++ b/parm/wmo/grib2_awpgfs090.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPU92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 90 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPU92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 90 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPU92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 90 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPU50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 90 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs096.003 b/parm/wmo/grib2_awpgfs096.003 index 02adbff282..95e7388125 100644 --- a/parm/wmo/grib2_awpgfs096.003 +++ b/parm/wmo/grib2_awpgfs096.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPM92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 96 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPM92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 96 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPM92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 96 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPM50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 96 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs102.003 b/parm/wmo/grib2_awpgfs102.003 index 7cecc5b074..feab0338d4 100644 --- a/parm/wmo/grib2_awpgfs102.003 +++ b/parm/wmo/grib2_awpgfs102.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPV92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 102 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPV92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 102 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPV92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 102 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPV50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 102 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs108.003 b/parm/wmo/grib2_awpgfs108.003 index 492e984f97..aa2a66f3a7 100644 --- a/parm/wmo/grib2_awpgfs108.003 +++ b/parm/wmo/grib2_awpgfs108.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPN92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 108 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPN92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 108 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPN92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 108 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPN50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 108 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs114.003 b/parm/wmo/grib2_awpgfs114.003 index 181cb2e21e..5f7606ddf4 100644 --- a/parm/wmo/grib2_awpgfs114.003 +++ b/parm/wmo/grib2_awpgfs114.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPW92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 114 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPW92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 114 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPW92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 114 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPW50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 114 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs120.003 b/parm/wmo/grib2_awpgfs120.003 index 976ce08024..a42df185ea 100644 --- a/parm/wmo/grib2_awpgfs120.003 +++ b/parm/wmo/grib2_awpgfs120.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPO92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 120 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPO92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 120 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPO92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 120 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPO50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 120 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs126.003 b/parm/wmo/grib2_awpgfs126.003 index 23742cdaf5..8f3f91c909 100644 --- a/parm/wmo/grib2_awpgfs126.003 +++ b/parm/wmo/grib2_awpgfs126.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 126 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 126 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 126 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 126 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs132.003 b/parm/wmo/grib2_awpgfs132.003 index 0556a4bc19..dfa0a4e8f6 100644 --- a/parm/wmo/grib2_awpgfs132.003 +++ b/parm/wmo/grib2_awpgfs132.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPP92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 132 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPP92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 132 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPP92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 132 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPP50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 132 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs138.003 b/parm/wmo/grib2_awpgfs138.003 index 98a5813a3a..8835cbfc72 100644 --- a/parm/wmo/grib2_awpgfs138.003 +++ b/parm/wmo/grib2_awpgfs138.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 138 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 138 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 138 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 138 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs144.003 b/parm/wmo/grib2_awpgfs144.003 index ba83e0134a..1ac08b6b5a 100644 --- a/parm/wmo/grib2_awpgfs144.003 +++ b/parm/wmo/grib2_awpgfs144.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPQ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 144 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPQ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 144 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPQ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 144 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPQ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 144 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs150.003 b/parm/wmo/grib2_awpgfs150.003 index 67fce65ebd..b2162cced0 100644 --- a/parm/wmo/grib2_awpgfs150.003 +++ b/parm/wmo/grib2_awpgfs150.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 150 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 150 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 150 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 150 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs156.003 b/parm/wmo/grib2_awpgfs156.003 index 1ca86738dd..c357fb88cc 100644 --- a/parm/wmo/grib2_awpgfs156.003 +++ b/parm/wmo/grib2_awpgfs156.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPR92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 156 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPR92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 156 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPR92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 156 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPR50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 156 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs162.003 b/parm/wmo/grib2_awpgfs162.003 index 3f6efb6e22..3cafb1e7b8 100644 --- a/parm/wmo/grib2_awpgfs162.003 +++ b/parm/wmo/grib2_awpgfs162.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 162 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 162 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 162 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 162 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs168.003 b/parm/wmo/grib2_awpgfs168.003 index 2f9e5913d6..29c3278f35 100644 --- a/parm/wmo/grib2_awpgfs168.003 +++ b/parm/wmo/grib2_awpgfs168.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPS92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 168 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPS92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 168 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPS92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 168 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPS50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 168 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs174.003 b/parm/wmo/grib2_awpgfs174.003 index eb54a49f35..c6b9c97775 100644 --- a/parm/wmo/grib2_awpgfs174.003 +++ b/parm/wmo/grib2_awpgfs174.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 174 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 174 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 174 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 174 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs180.003 b/parm/wmo/grib2_awpgfs180.003 index 47670ca178..dce38df27f 100644 --- a/parm/wmo/grib2_awpgfs180.003 +++ b/parm/wmo/grib2_awpgfs180.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPT92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 180 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPT92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 180 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPT92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 180 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPT50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 180 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs186.003 b/parm/wmo/grib2_awpgfs186.003 index 10f3239a15..370b9ce6e8 100644 --- a/parm/wmo/grib2_awpgfs186.003 +++ b/parm/wmo/grib2_awpgfs186.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 186 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 186 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 186 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 186 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs192.003 b/parm/wmo/grib2_awpgfs192.003 index d3a9638d5e..0a5bd27aca 100644 --- a/parm/wmo/grib2_awpgfs192.003 +++ b/parm/wmo/grib2_awpgfs192.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPU92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 192 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPU92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 192 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPU92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 192 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPU50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 192 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs198.003 b/parm/wmo/grib2_awpgfs198.003 index 9b587fb043..87036abba8 100644 --- a/parm/wmo/grib2_awpgfs198.003 +++ b/parm/wmo/grib2_awpgfs198.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 198 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 198 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 198 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 198 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs204.003 b/parm/wmo/grib2_awpgfs204.003 index 38551e7392..abf45424ea 100644 --- a/parm/wmo/grib2_awpgfs204.003 +++ b/parm/wmo/grib2_awpgfs204.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPV92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 204 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPV92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 204 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPV92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 204 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPV50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 204 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs210.003 b/parm/wmo/grib2_awpgfs210.003 index 45d617ec9b..74791a663f 100644 --- a/parm/wmo/grib2_awpgfs210.003 +++ b/parm/wmo/grib2_awpgfs210.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 210 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 210 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 210 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 210 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs216.003 b/parm/wmo/grib2_awpgfs216.003 index 16de54e624..dca67d2056 100644 --- a/parm/wmo/grib2_awpgfs216.003 +++ b/parm/wmo/grib2_awpgfs216.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPW92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 216 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPW92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 216 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPW92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 216 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPW50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 216 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs222.003 b/parm/wmo/grib2_awpgfs222.003 index e0d027c13d..d96242c1cb 100644 --- a/parm/wmo/grib2_awpgfs222.003 +++ b/parm/wmo/grib2_awpgfs222.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 222 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 222 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 222 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 222 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs228.003 b/parm/wmo/grib2_awpgfs228.003 index 0309033c14..22e419660c 100644 --- a/parm/wmo/grib2_awpgfs228.003 +++ b/parm/wmo/grib2_awpgfs228.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPX92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 228 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPX92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 228 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPX92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 228 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPX50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 228 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs234.003 b/parm/wmo/grib2_awpgfs234.003 index 36df325d44..6a6a4a89d9 100644 --- a/parm/wmo/grib2_awpgfs234.003 +++ b/parm/wmo/grib2_awpgfs234.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='ZRPZ92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 234 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='ZUPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 234 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='ZVPZ92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 234 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='ZHPZ50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 234 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs240.003 b/parm/wmo/grib2_awpgfs240.003 index b55f8d2194..4fe553764e 100644 --- a/parm/wmo/grib2_awpgfs240.003 +++ b/parm/wmo/grib2_awpgfs240.003 @@ -106,4 +106,3 @@ &GRIBIDS DESC=' R H 925 mb ',WMOHEAD='YRPY92 KWBC',PDTN= 0 ,PDT= 1 1 2 0 96 0 0 1 240 100 0 92500 255 0 0 / &GRIBIDS DESC=' U GRD 925 mb ',WMOHEAD='YUPY92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 96 0 0 1 240 100 0 92500 255 0 0 / &GRIBIDS DESC=' V GRD 925 mb ',WMOHEAD='YVPY92 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 96 0 0 1 240 100 0 92500 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHPY50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 96 0 0 1 240 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs_20km_akf000 b/parm/wmo/grib2_awpgfs_20km_akf000 index 10205f0c3a..d44d417e36 100644 --- a/parm/wmo/grib2_awpgfs_20km_akf000 +++ b/parm/wmo/grib2_awpgfs_20km_akf000 @@ -244,4 +244,3 @@ &GRIBIDS DESC=' R H 2 m above ground ',WMOHEAD='YRBA98 KWBC',PDTN= 0 ,PDT= 1 1 2 0 81 0 0 1 0 103 0 2 255 0 0 / &GRIBIDS DESC=' U GRD 10 m above ground ',WMOHEAD='YUBA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 81 0 0 1 0 103 0 10 255 0 0 / &GRIBIDS DESC=' V GRD 10 m above ground ',WMOHEAD='YVBA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 81 0 0 1 0 103 0 10 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHBA50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 81 0 0 1 0 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs_20km_conusf000 b/parm/wmo/grib2_awpgfs_20km_conusf000 index 7f01e5c1e5..4eab5f3dbf 100644 --- a/parm/wmo/grib2_awpgfs_20km_conusf000 +++ b/parm/wmo/grib2_awpgfs_20km_conusf000 @@ -244,4 +244,3 @@ &GRIBIDS DESC=' R H 2 m above ground ',WMOHEAD='YRNA98 KWBC',PDTN= 0 ,PDT= 1 1 2 0 81 0 0 1 0 103 0 2 255 0 0 / &GRIBIDS DESC=' U GRD 10 m above ground ',WMOHEAD='YUNA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 81 0 0 1 0 103 0 10 255 0 0 / &GRIBIDS DESC=' V GRD 10 m above ground ',WMOHEAD='YVNA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 81 0 0 1 0 103 0 10 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHNA50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 81 0 0 1 0 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs_20km_pacf000 b/parm/wmo/grib2_awpgfs_20km_pacf000 index c7fa00a405..f7d8962a07 100644 --- a/parm/wmo/grib2_awpgfs_20km_pacf000 +++ b/parm/wmo/grib2_awpgfs_20km_pacf000 @@ -244,4 +244,3 @@ &GRIBIDS DESC=' R H 2 m above ground ',WMOHEAD='YREA98 KWBC',PDTN= 0 ,PDT= 1 1 2 0 81 0 0 1 0 103 0 2 255 0 0 / &GRIBIDS DESC=' U GRD 10 m above ground ',WMOHEAD='YUEA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 81 0 0 1 0 103 0 10 255 0 0 / &GRIBIDS DESC=' V GRD 10 m above ground ',WMOHEAD='YVEA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 81 0 0 1 0 103 0 10 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHEA50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 81 0 0 1 0 100 0 50000 255 0 0 / diff --git a/parm/wmo/grib2_awpgfs_20km_pricof000 b/parm/wmo/grib2_awpgfs_20km_pricof000 index 9176d315d0..30a4ab9c97 100644 --- a/parm/wmo/grib2_awpgfs_20km_pricof000 +++ b/parm/wmo/grib2_awpgfs_20km_pricof000 @@ -244,4 +244,3 @@ &GRIBIDS DESC=' R H 2 m above ground ',WMOHEAD='YRFA98 KWBC',PDTN= 0 ,PDT= 1 1 2 0 81 0 0 1 0 103 0 2 255 0 0 / &GRIBIDS DESC=' U GRD 10 m above ground ',WMOHEAD='YUFA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 2 2 0 81 0 0 1 0 103 0 10 255 0 0 / &GRIBIDS DESC=' V GRD 10 m above ground ',WMOHEAD='YVFA98 KWBC',EXTRACT=.true.,PDTN= 0 ,PDT= 2 3 2 0 81 0 0 1 0 103 0 10 255 0 0 / -&GRIBIDS DESC=' 5WAVH 500 mb ',WMOHEAD='YHFA50 KWBC',PDTN= 0 ,PDT= 3 193 2 0 81 0 0 1 0 100 0 50000 255 0 0 / diff --git a/scripts/exgfs_atmos_awips_20km_1p0deg.sh b/scripts/exgfs_atmos_awips_20km_1p0deg.sh index 4959bbd8e8..ba0c7f64b6 100755 --- a/scripts/exgfs_atmos_awips_20km_1p0deg.sh +++ b/scripts/exgfs_atmos_awips_20km_1p0deg.sh @@ -174,11 +174,6 @@ for GRID in conus ak prico pac 003; do export FORT51="grib2.awpgfs${fcsthrs}.${GRID}" cp "${PARMgfs}/wmo/grib2_awpgfs${fcsthrs}.${GRID}" "parm_list" - if [[ ${DO_WAVE} != "YES" ]]; then - # Remove wave field it not running wave model - grep -vw "5WAVH" "parm_list" > "parm_list_temp" - mv "parm_list_temp" "parm_list" - fi ${TOCGRIB2} < "parm_list" >> "${pgmout}" 2> errfile export err=$?; err_chk @@ -208,11 +203,6 @@ for GRID in conus ak prico pac 003; do export FORT51="grib2.awpgfs_20km_${GRID}_f${fcsthrs}" cp "${PARMgfs}/wmo/grib2_awpgfs_20km_${GRID}f${fcsthrs}" "parm_list" - if [[ ${DO_WAVE} != "YES" ]]; then - # Remove wave field it not running wave model - grep -vw "5WAVH" "parm_list" > "parm_list_temp" - mv "parm_list_temp" "parm_list" - fi ${TOCGRIB2} < "parm_list" >> "${pgmout}" 2> errfile export err=$?; err_chk || exit "${err}" From db46d16fe3208421cccd4bfde09be5f4aca3cc9f Mon Sep 17 00:00:00 2001 From: RussTreadon-NOAA <26926959+RussTreadon-NOAA@users.noreply.github.com> Date: Wed, 8 Jan 2025 09:54:38 -0500 Subject: [PATCH 15/33] Remove cpus-per-task from APRUN_OCNANALECEN on WCOSS2 (#3212) Job gdas_ocnanalecen fails on WCOSS2 when running g-w CI C48mx500_hybAOWCDA. The failure is due to an invalid option on the `mpiexec` line. This PR removes `--cpus-per-task` from `WCOSS2.env`. Resolves #3158 --- env/WCOSS2.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env/WCOSS2.env b/env/WCOSS2.env index e787202d66..fe6acf88fb 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -123,7 +123,7 @@ elif [[ "${step}" = "marineanlchkpt" ]]; then elif [[ "${step}" = "ocnanalecen" ]]; then export NTHREADS_OCNANALECEN=${NTHREADSmax} - export APRUN_OCNANALECEN="${APRUN_default} --cpus-per-task=${NTHREADS_OCNANALECEN}" + export APRUN_OCNANALECEN="${APRUN_default}" elif [[ "${step}" = "marineanlletkf" ]]; then From 673470a3362c2c498f38b4545d76cf6d8f76887e Mon Sep 17 00:00:00 2001 From: RussTreadon-NOAA <26926959+RussTreadon-NOAA@users.noreply.github.com> Date: Wed, 8 Jan 2025 11:03:21 -0500 Subject: [PATCH 16/33] Update gsi_enkf hash and gsi_ver (#3207) This PR updates the `sorc/gsi_enkf.fd` hash. `gsi_ver` is updated to be consistent with the `gsi_enkf.fd` hash. Resolves #3027 --- sorc/gsi_enkf.fd | 2 +- versions/fix.ver | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sorc/gsi_enkf.fd b/sorc/gsi_enkf.fd index 9f44c8798c..27c03e83a6 160000 --- a/sorc/gsi_enkf.fd +++ b/sorc/gsi_enkf.fd @@ -1 +1 @@ -Subproject commit 9f44c8798c2087aca06df8f629699632e57df431 +Subproject commit 27c03e83a66fa2ba3b32781dab7cd6a8c0fc497d diff --git a/versions/fix.ver b/versions/fix.ver index 991e0ce13a..db54701e77 100644 --- a/versions/fix.ver +++ b/versions/fix.ver @@ -15,7 +15,7 @@ export gdas_obs_ver=20240213 export gdas_aero_ver=20240806 export gdas_snow_ver=20241210 export glwu_ver=20220805 -export gsi_ver=20240208 +export gsi_ver=20241022 export lut_ver=20220805 export mom6_ver=20240416 export orog_ver=20231027 From bb029b52540559d494bbdf4f69d5e86a1c35f4d7 Mon Sep 17 00:00:00 2001 From: Neil Barton <103681022+NeilBarton-NOAA@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:40:50 -0500 Subject: [PATCH 17/33] Separate use of initial increment/perturbation file from REPLAY/+03 ICs (#3119) This PR is a continuation of the previous SFS PR. The use of initial perturbations/increments for ensemble long forecasts (e.g., SFS and GEFS) was missing from the previous PR because this option was included with the REPLAY_IC option. In this PR, the REPLAY_IC option is now strictly for the +03 ICs, and the variables USE_ATM_ENS_PERTURB_FILES and USE_OCN_ENS_PERTURB_FILES are used for initialization with the initial perturbation/increment files used for SFS and GEFS reforecasting. This separation will also make it easier to remove the +03 IC optoin. --------- Co-authored-by: Eric.Sinsky Co-authored-by: Walter Kolczynski - NOAA Co-authored-by: Rahul Mahajan Co-authored-by: Xiaqiong.Zhou --- ci/cases/yamls/gefs_defaults_ci.yaml | 1 - ci/cases/yamls/sfs_defaults.yaml | 7 +++---- parm/config/gefs/config.base | 5 +++++ parm/config/gefs/config.efcs | 2 +- parm/config/gefs/config.fcst | 3 ++- parm/config/gefs/config.nsst | 4 ---- parm/config/gefs/config.resources | 2 +- parm/config/gefs/config.stage_ic | 4 ---- parm/config/gefs/config.ufs | 4 ++-- parm/config/gefs/yaml/defaults.yaml | 6 ++---- parm/config/gfs/config.fcst | 2 ++ ush/forecast_det.sh | 1 + ush/forecast_postdet.sh | 14 +++++++++----- ush/forecast_predet.sh | 16 ++++++++-------- 14 files changed, 36 insertions(+), 35 deletions(-) diff --git a/ci/cases/yamls/gefs_defaults_ci.yaml b/ci/cases/yamls/gefs_defaults_ci.yaml index a06aed638a..a19be7713f 100644 --- a/ci/cases/yamls/gefs_defaults_ci.yaml +++ b/ci/cases/yamls/gefs_defaults_ci.yaml @@ -4,7 +4,6 @@ base: ACCOUNT: {{ 'HPC_ACCOUNT' | getenv }} SFS_POST: "NO" FHOUT_GFS: 6 -stage_ic: USE_OCN_ENS_PERTURB_FILES: "NO" USE_ATM_ENS_PERTURB_FILES: "NO" ocn: diff --git a/ci/cases/yamls/sfs_defaults.yaml b/ci/cases/yamls/sfs_defaults.yaml index b1de60ce71..43f27d9239 100644 --- a/ci/cases/yamls/sfs_defaults.yaml +++ b/ci/cases/yamls/sfs_defaults.yaml @@ -7,7 +7,7 @@ base: DO_BUFRSND: "NO" DO_GEMPAK: "NO" DO_AWIPS: "NO" - KEEPDATA: "YES" + KEEPDATA: "NO" DO_EXTRACTVARS: "NO" FHMAX_GFS: 2976 FHMAX_HF_GFS: 0 @@ -17,6 +17,8 @@ base: FHOUT_ICE_GFS: 24 FCST_BREAKPOINTS: "" REPLAY_ICS: "NO" + USE_OCN_ENS_PERTURB_FILES: "YES" + USE_ATM_ENS_PERTURB_FILES: "YES" HPSSARCH: "NO" LOCALARCH: "NO" SFS_POST: "YES" @@ -26,8 +28,5 @@ fcst: MONO: "mono" reforecast: "YES" FHZER: 24 -stage_ic: - USE_OCN_ENS_PERTURB_FILES: "YES" - USE_ATM_ENS_PERTURB_FILES: "YES" ocn: MOM6_INTERP_ICS: "YES" diff --git a/parm/config/gefs/config.base b/parm/config/gefs/config.base index 8d5852a15b..fc641fef6b 100644 --- a/parm/config/gefs/config.base +++ b/parm/config/gefs/config.base @@ -227,8 +227,13 @@ export SDATE_GFS=@SDATE_GFS@ export REPLAY_ICS=@REPLAY_ICS@ if [[ "${REPLAY_ICS:-NO}" == "YES" ]]; then export OFFSET_START_HOUR=$(( assim_freq / 2 )) + echo "WARNING: Replay ICs require perturbation files, ignoring any previous settings" + export USE_OCN_ENS_PERTURB_FILES="YES" + export USE_ATM_ENS_PERTURB_FILES="YES" else export OFFSET_START_HOUR=0 + export USE_OCN_ENS_PERTURB_FILES=@USE_OCN_ENS_PERTURB_FILES@ + export USE_ATM_ENS_PERTURB_FILES=@USE_ATM_ENS_PERTURB_FILES@ fi # GFS output and frequency diff --git a/parm/config/gefs/config.efcs b/parm/config/gefs/config.efcs index d1f7f7f714..9ebf21ce1e 100644 --- a/parm/config/gefs/config.efcs +++ b/parm/config/gefs/config.efcs @@ -57,7 +57,7 @@ if (( OCNRES <= 100 )); then export EPBL_LSCALE="500.E3,1000.E3,2000.E3,2000.E3,2000.E3" fi -if [[ "${REPLAY_ICS:-NO}" == "YES" ]]; then +if [[ "${USE_OCN_ENS_PERTURB_FILES:-NO}" == "YES" ]]; then export ODA_INCUPD="True" export ODA_TEMPINC_VAR='t_pert' export ODA_SALTINC_VAR='s_pert' diff --git a/parm/config/gefs/config.fcst b/parm/config/gefs/config.fcst index 43e2724dc6..2fef1ba0a0 100644 --- a/parm/config/gefs/config.fcst +++ b/parm/config/gefs/config.fcst @@ -192,6 +192,7 @@ case ${imp_physics} in export hord_mt_nh_nonmono=5 export hord_xx_nh_nonmono=5 + export hord_dp_nh_nonmono=-5 export vtdm4_nh_nonmono=0.02 export nord=2 if [[ "${TYPE}" == "nh"* ]]; then @@ -199,7 +200,6 @@ case ${imp_physics} in else export dddmp=0. fi - export dddmp=0.1 export d4_bg=0.12 ;; 11) # GFDL @@ -216,6 +216,7 @@ case ${imp_physics} in export hord_mt_nh_nonmono=5 export hord_xx_nh_nonmono=5 + export hord_dp_nh_nonmono=-5 export vtdm4_nh_nonmono=0.02 export nord=2 export d4_bg=0.12 diff --git a/parm/config/gefs/config.nsst b/parm/config/gefs/config.nsst index db4367b2c0..f9a29d44d3 100644 --- a/parm/config/gefs/config.nsst +++ b/parm/config/gefs/config.nsst @@ -12,10 +12,6 @@ export NST_MODEL=2 # nstf_name(2) : NST_SPINUP : 0 = OFF, 1 = ON, export NST_SPINUP=0 -cdate="${PDY}${cyc}" -if (( cdate < 2017072000 )); then - export NST_SPINUP=1 -fi # nstf_name(3) : NST_RESV (Reserved, NSST Analysis) : 0 = OFF, 1 = ON export NST_RESV=0 diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index 68f81c1039..17033858c8 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -219,7 +219,7 @@ case ${step} in case "${CASE}" in "C48" | "C96" | "C192") - declare -x "walltime"="03:00:00" + declare -x "walltime"="04:00:00" ;; "C384" | "C768" | "C1152") declare -x "walltime"="06:00:00" diff --git a/parm/config/gefs/config.stage_ic b/parm/config/gefs/config.stage_ic index 5822f2e794..8cd97b58ac 100644 --- a/parm/config/gefs/config.stage_ic +++ b/parm/config/gefs/config.stage_ic @@ -32,8 +32,4 @@ if [[ -z "${ICSDIR}" ]] ; then fi -#use of perturbations files for ensembles -export USE_OCN_ENS_PERTURB_FILES=@USE_OCN_ENS_PERTURB_FILES@ -export USE_ATM_ENS_PERTURB_FILES=@USE_ATM_ENS_PERTURB_FILES@ - echo "END: config.stage_ic" diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs index c46023aff6..3f931d7c0d 100644 --- a/parm/config/gefs/config.ufs +++ b/parm/config/gefs/config.ufs @@ -99,8 +99,8 @@ case "${fv3_res}" in export DELTIM=600 export layout_x=2 export layout_y=2 - export layout_x_gfs=4 - export layout_y_gfs=4 + export layout_x_gfs=6 + export layout_y_gfs=8 export nthreads_fv3=1 export nthreads_fv3_gfs=1 export nthreads_ufs=1 diff --git a/parm/config/gefs/yaml/defaults.yaml b/parm/config/gefs/yaml/defaults.yaml index 48cf912dcb..df12b16282 100644 --- a/parm/config/gefs/yaml/defaults.yaml +++ b/parm/config/gefs/yaml/defaults.yaml @@ -14,22 +14,20 @@ base: FHOUT_HF_GFS: 1 FCST_BREAKPOINTS: "48" REPLAY_ICS: "NO" - USE_OCN_PERTURB_FILES: "false" FHOUT_GFS: 6 FHOUT_OCN_GFS: 6 FHOUT_ICE_GFS: 6 HPSSARCH: "NO" LOCALARCH: "NO" SFS_POST: "NO" + USE_OCN_ENS_PERTURB_FILES: "NO" + USE_ATM_ENS_PERTURB_FILES: "NO" DO_TEST_MODE: "NO" fcst: reforecast: "NO" FHZER: 6 TYPE: "nh" MONO: "non-mono" -stage_ic: - USE_OCN_ENS_PERTURB_FILES: "NO" - USE_ATM_ENS_PERTURB_FILES: "NO" ocn: MOM6_INTERP_ICS: "NO" # config.aero has just a system-specific path to add. diff --git a/parm/config/gfs/config.fcst b/parm/config/gfs/config.fcst index 5be4f4eca7..992f9eb329 100644 --- a/parm/config/gfs/config.fcst +++ b/parm/config/gfs/config.fcst @@ -203,6 +203,7 @@ case ${imp_physics} in export hord_mt_nh_nonmono=5 export hord_xx_nh_nonmono=5 + export hord_dp_nh_nonmono=-5 export vtdm4_nh_nonmono=0.02 export nord=2 export dddmp=0.1 @@ -236,6 +237,7 @@ case ${imp_physics} in export hord_mt_nh_nonmono=5 export hord_xx_nh_nonmono=5 + export hord_dp_nh_nonmono=-5 export vtdm4_nh_nonmono=0.02 export nord=2 export d4_bg=0.12 diff --git a/ush/forecast_det.sh b/ush/forecast_det.sh index 6d321aa620..9854494859 100755 --- a/ush/forecast_det.sh +++ b/ush/forecast_det.sh @@ -86,6 +86,7 @@ UFS_det(){ MOM6_INIT_FROM_Z=True MOM6_WARMSTART_FILE="none" MOM6_INIT_UV="zero" + ODA_INCUPD="False" fi # Check for CICE6 restart availability diff --git a/ush/forecast_postdet.sh b/ush/forecast_postdet.sh index 25cd4d36f0..64cb14a3ec 100755 --- a/ush/forecast_postdet.sh +++ b/ush/forecast_postdet.sh @@ -98,8 +98,10 @@ FV3_postdet() { # Determine increment files when doing cold start if [[ "${warm_start}" == ".false." ]]; then - if [[ "${REPLAY_ICS:-NO}" == "YES" ]]; then - IAU_FHROT=${half_window} # Replay ICs start at the end of the assimilation window + if [[ "${USE_ATM_ENS_PERTURB_FILES:-NO}" == "YES" ]]; then + if [[ "${REPLAY_ICS:-NO}" == "YES" ]]; then + IAU_FHROT=${half_window} # Replay ICs start at the end of the assimilation window + fi if (( MEMBER == 0 )); then inc_files=() else @@ -109,7 +111,7 @@ FV3_postdet() { fi local increment_file for inc_file in "${inc_files[@]}"; do - increment_file="${COMIN_ATMOS_INPUT}/${RUN}.t${cyc}z.${inc_file}" + increment_file="${COMIN_ATMOS_ANALYSIS}/${RUN}.t${cyc}z.${inc_file}" if [[ -f "${increment_file}" ]]; then ${NCP} "${increment_file}" "${DATA}/INPUT/${inc_file}" else @@ -173,8 +175,10 @@ EOF inc_files=("atminc.nc") read_increment=".true." res_latlon_dynamics="atminc.nc" - if [[ "${REPLAY_ICS:-NO}" == "YES" ]]; then - IAU_FHROT=${half_window} # Replay ICs start at the end of the assimilation window + if [[ "${USE_ATM_ENS_PERTURB_FILES:-NO}" == "YES" ]]; then + if [[ "${REPLAY_ICS:-NO}" == "YES" ]]; then + IAU_FHROT=${half_window} # Replay ICs start at the end of the assimilation window + fi # Control member has no perturbation if (( MEMBER == 0 )); then inc_files=() diff --git a/ush/forecast_predet.sh b/ush/forecast_predet.sh index 14f32378c3..2b730fa7d6 100755 --- a/ush/forecast_predet.sh +++ b/ush/forecast_predet.sh @@ -351,11 +351,15 @@ FV3_predet(){ if [[ "${TYPE}" == "nh" ]]; then # monotonic and non-hydrostatic hord_mt=${hord_mt_nh_mono:-"10"} hord_xx=${hord_xx_nh_mono:-"10"} - hord_dp=-${hord_xx_nh_nonmono:-"-10"} + hord_dp=${hord_xx_nh_mono:-"10"} else # monotonic and hydrostatic hord_mt=${hord_mt_hydro_mono:-"10"} hord_xx=${hord_xx_hydro_mono:-"10"} - hord_dp=-${hord_xx_nh_nonmono:-"-10"} + hord_dp=${hord_xx_hydro_mono:-"10"} + kord_tm=${kord_tm_hydro_mono:-"-12"} + kord_mt=${kord_mt_hydro_mono:-"12"} + kord_wz=${kord_wz_hydro_mono:-"12"} + kord_tr=${kord_tr_hydro_mono:-"12"} fi else # non-monotonic options d_con=${d_con_nonmono:-"1."} @@ -363,15 +367,11 @@ FV3_predet(){ if [[ "${TYPE}" == "nh" ]]; then # non-monotonic and non-hydrostatic hord_mt=${hord_mt_nh_nonmono:-"5"} hord_xx=${hord_xx_nh_nonmono:-"5"} - hord_dp=${hord_xx_hydro_mono:-"-5"} + hord_dp=${hord_dp_nh_nonmono:-"-5"} else # non-monotonic and hydrostatic hord_mt=${hord_mt_hydro_nonmono:-"10"} hord_xx=${hord_xx_hydro_nonmono:-"10"} - hord_dp=${hord_xx_hydro_mono:-"10"} - kord_tm=${kord_tm_hydro_mono:-"-12"} - kord_mt=${kord_mt_hydro_mono:-"12"} - kord_wz=${kord_wz_hydro_mono:-"12"} - kord_tr=${kord_tr_hydro_mono:-"12"} + hord_dp=${hord_xx_hydro_nonmono:-"10"} fi fi From ed955461b2b4a0d87f87397a37f967f8aae3919e Mon Sep 17 00:00:00 2001 From: CatherineThomas-NOAA <59020064+CatherineThomas-NOAA@users.noreply.github.com> Date: Fri, 10 Jan 2025 02:31:48 -0500 Subject: [PATCH 18/33] Update g-w to cycle with C1152 ATM (#3206) There are a few updates needed to cycle with the C1152 atmospheric model: - config.resources(.$machine): include C1152 case options for DA tasks - gdasfcst config.ufs: WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=20 - source code update for calc_analysis NOAA-EMC/GSI-utils#59 - resource change needed for atmanl upp Note that any needed changes to time limits for C1152 will be addressed at a later date. Resolves #3173 --- parm/config/gfs/config.resources | 31 ++++++++++++++----------- parm/config/gfs/config.resources.GAEA | 2 +- parm/config/gfs/config.resources.HERA | 4 ++-- parm/config/gfs/config.resources.JET | 2 +- parm/config/gfs/config.resources.WCOSS2 | 6 ++--- parm/config/gfs/config.ufs | 2 +- sorc/gsi_utils.fd | 2 +- 7 files changed, 27 insertions(+), 22 deletions(-) diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index eeb33716c0..5acc7e5620 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -324,7 +324,7 @@ case ${step} in "snowanl") # below lines are for creating JEDI YAML case ${CASE} in - "C768") + "C1152" | "C768") layout_x=6 layout_y=6 ;; @@ -353,7 +353,7 @@ case ${step} in "esnowanl") # below lines are for creating JEDI YAML case ${CASE} in - "C768") + "C1152" | "C768") layout_x=6 layout_y=6 ;; @@ -390,7 +390,7 @@ case ${step} in "aeroanlinit") # below lines are for creating JEDI YAML case ${CASE} in - "C768") + "C1152" | "C768") layout_x=8 layout_y=8 ;; @@ -423,7 +423,7 @@ case ${step} in "aeroanlvar") case ${CASE} in - "C768") + "C1152" | "C768") layout_x=8 layout_y=8 ;; @@ -457,7 +457,7 @@ case ${step} in "aeroanlgenb") case ${CASE} in - "C768") + "C1152" | "C768") layout_x=8 layout_y=8 ;; @@ -668,7 +668,7 @@ case ${step} in walltime_gdas="01:20:00" walltime_gfs="01:00:00" case ${CASE} in - "C768") + "C1152" | "C768") ntasks_gdas=780 ntasks_gfs=825 threads_per_task=5 @@ -704,7 +704,7 @@ case ${step} in export threads_per_task_echgres_gfs=12 export is_exclusive=True memory="48GB" - if [[ "${CASE}" == "C384" || "${CASE}" == "C768" ]]; then + if [[ "${CASE}" == "C384" || "${CASE}" == "C768" || "${CASE}" == "C1152" ]]; then memory="${mem_node_max}" fi ;; @@ -897,7 +897,7 @@ case ${step} in ;; "C768" | "C1152") # Not valid resolutions for ensembles - declare -x "walltime_gdas"="00:40:00" + declare -x "walltime_gdas"="00:50:00" declare -x "walltime_gfs"="06:00:00" ;; *) @@ -923,16 +923,21 @@ case ${step} in "C48" | "C96") ntasks=${CASE:1} ;; - "C192" | "C384" | "C768" ) + "C192" | "C384" | "C768") ntasks=120 memory="${mem_node_max}" ;; + "C1152") + ntasks=200 + memory="${mem_node_max}" + ;; *) echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}" exit 4 ;; esac tasks_per_node=${ntasks} + [[ ${CASE} == "C1152" ]] && tasks_per_node=40 threads_per_task=1 @@ -1005,7 +1010,7 @@ case ${step} in threads_per_task=1 tasks_per_node=1 memory="20G" - [[ ${CASE} == "C768" ]] && memory="80GB" + [[ ${CASE} == "C768" || ${CASE} == "C1152" ]] && memory="80GB" ;; "metp") @@ -1145,7 +1150,7 @@ case ${step} in fi case ${CASE} in - "C768") ntasks=200;; + "C1152" | "C768") ntasks=200;; "C384") ntasks=100;; "C192" | "C96" | "C48") ntasks=40;; *) @@ -1178,7 +1183,7 @@ case ${step} in "eupd") walltime="00:30:00" case ${CASE} in - "C768") + "C1152" | "C768") ntasks=480 threads_per_task=6 ;; @@ -1223,7 +1228,7 @@ case ${step} in "epos") walltime="00:15:00" - [[ ${CASE} == "C768" ]] && walltime="00:25:00" + [[ ${CASE} == "C768" || ${CASE} == "C1152" ]] && walltime="00:25:00" ntasks=80 threads_per_task=1 tasks_per_node=$(( max_tasks_per_node / threads_per_task )) diff --git a/parm/config/gfs/config.resources.GAEA b/parm/config/gfs/config.resources.GAEA index c50601da00..aa353b9302 100644 --- a/parm/config/gfs/config.resources.GAEA +++ b/parm/config/gfs/config.resources.GAEA @@ -12,7 +12,7 @@ case ${step} in # The number of tasks and cores used must be the same for eobs # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details case ${CASE} in - "C768" | "C384") + "C1152" | "C768" | "C384") export tasks_per_node=50 ;; *) diff --git a/parm/config/gfs/config.resources.HERA b/parm/config/gfs/config.resources.HERA index ac3067d9f9..0a190251b0 100644 --- a/parm/config/gfs/config.resources.HERA +++ b/parm/config/gfs/config.resources.HERA @@ -32,7 +32,7 @@ case ${step} in "eupd") case ${CASE} in - "C768") + "C1152" | "C768") export ntasks=80 export threads_per_task=20 ;; @@ -49,7 +49,7 @@ case ${step} in ;; "ecen") - if [[ "${CASE}" == "C768" ]]; then export threads_per_task=6; fi + if [[ "${CASE}" == "C768" || "${CASE}" == "C1152" ]]; then export threads_per_task=6; fi export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) ;; diff --git a/parm/config/gfs/config.resources.JET b/parm/config/gfs/config.resources.JET index bbd308f439..93f64c2c5b 100644 --- a/parm/config/gfs/config.resources.JET +++ b/parm/config/gfs/config.resources.JET @@ -40,7 +40,7 @@ case ${step} in ;; "ecen") - if [[ "${CASE}" == "C768" ]]; then export threads_per_task=6; fi + if [[ "${CASE}" == "C768" || "${CASE}" == "C1152" ]]; then export threads_per_task=6; fi export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) ;; diff --git a/parm/config/gfs/config.resources.WCOSS2 b/parm/config/gfs/config.resources.WCOSS2 index 3ff019068c..342286d008 100644 --- a/parm/config/gfs/config.resources.WCOSS2 +++ b/parm/config/gfs/config.resources.WCOSS2 @@ -9,7 +9,7 @@ case ${step} in ;; "anal") - if [[ "${CASE}" == "C768" ]]; then + if [[ "${CASE}" == "C768" || "${CASE}" == "C1152" ]]; then export threads_per_task=8 # Make ntasks a multiple of 16 export ntasks_gdas=784 @@ -43,7 +43,7 @@ case ${step} in "eupd") case ${CASE} in - "C768" | "C384") + "C1152" | "C768" | "C384") export ntasks=315 export threads_per_task=14 ;; @@ -55,7 +55,7 @@ case ${step} in "eobs") case ${CASE} in - "C768" | "C384") + "C1152" | "C768" | "C384") export tasks_per_node=50 ;; *) diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs index 9737404dd1..0a16a75cb2 100644 --- a/parm/config/gfs/config.ufs +++ b/parm/config/gfs/config.ufs @@ -315,7 +315,7 @@ case "${fv3_res}" in export rf_cutoff=100.0 export fv_sg_adj=450 export WRITE_GROUP_GDAS=4 - export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=10 # TODO: refine these numbers when a case is available + export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GDAS=20 # TODO: refine these numbers when a case is available export WRITE_GROUP_GFS=4 export WRTTASK_PER_GROUP_PER_THREAD_PER_TILE_GFS=20 # TODO: refine these numbers when a case is available ;; diff --git a/sorc/gsi_utils.fd b/sorc/gsi_utils.fd index a6ea311e5c..f716012812 160000 --- a/sorc/gsi_utils.fd +++ b/sorc/gsi_utils.fd @@ -1 +1 @@ -Subproject commit a6ea311e5c82369d255e3afdc99c1bce0c9a3014 +Subproject commit f716012812c2564e7eab24041f7a3ec14c7aa383 From b850b102b98df35a15e6da171d69ee171dbddfd2 Mon Sep 17 00:00:00 2001 From: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> Date: Fri, 10 Jan 2025 14:31:37 +0000 Subject: [PATCH 19/33] Prevent duplicate case generation in generate_workflows.sh (#3217) This prevents generate_workflows.sh from generating multiple, identical test cases. Resolves #3213 --- workflow/generate_workflows.sh | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/workflow/generate_workflows.sh b/workflow/generate_workflows.sh index a5615a8b0d..dbd360fda2 100755 --- a/workflow/generate_workflows.sh +++ b/workflow/generate_workflows.sh @@ -17,13 +17,13 @@ function _usage() { directory up from this script's residing directory. -b Run build_all.sh with default flags - (build the UFS, UPP, UFS_Utils, and GFS-utils only + (build the UFS, UPP, UFS_Utils, and GFS-utils only) -u Update submodules before building and/or generating experiments. -y "list of YAMLs to run" If this option is not specified, the default case (C48_ATM) will be - run. This option is overidden by -G or -E (see below). + run. This option is incompatible with -G, -E, or -S. Example: -y "C48_ATM C48_S2SW C96C48_hybatmDA" -Y /path/to/directory/with/YAMLs @@ -43,7 +43,6 @@ function _usage() { Run all valid SFS cases in the specified YAML directory. NOTES: - - Only one of -G -E or -S may be specified - Valid cases are determined by the experiment:system key as well as the skip_ci_on_hosts list in each YAML. @@ -223,6 +222,22 @@ else done fi +# Empty the _yaml_list array if -G, -E, and/or -S were selected +if [[ "${_run_all_gfs}" == "true" || \ + "${_run_all_gefs}" == "true" || \ + "${_run_all_sfs}" == "true" ]]; then + + # Raise an error if the user specified a yaml list and any of -G -E -S + if [[ "${_specified_yaml_list}" == "true" ]]; then + echo "Ambiguous case selection." + echo "Please select which tests to run explicitly with -y \"list of tests\" or" + echo "by specifying -G (all GFS), -E (all GEFS), and/or -S (all SFS), but not both." + exit 3 + fi + + _yaml_list=() +fi + # If -S is specified, exit (for now). # TODO when SFS tests come online, enable this option. if [[ "${_run_all_sfs}" == "true" ]]; then From fea98f9b572e1ab2ceffab7a06d0bcc0298d59cf Mon Sep 17 00:00:00 2001 From: Walter Kolczynski - NOAA Date: Fri, 10 Jan 2025 09:32:21 -0500 Subject: [PATCH 20/33] Update upload-artifact to v4 (#3216) Updates the version of the GH marketplace action upload-artifact from v3 to v4. v3 is deprecated and will be unavailable after 2025 January 30. Resolves #3073 --- .github/workflows/docs.yaml | 4 ++-- .github/workflows/linters.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 89b5fb617a..59691b1bd4 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -42,14 +42,14 @@ jobs: ./.github/scripts/build_docs.sh - name: Upload documentation (on success) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: documentation path: artifact/documentation - name: Upload warnings (on failure) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: failure() with: name: documentation_warnings.log diff --git a/.github/workflows/linters.yaml b/.github/workflows/linters.yaml index 7816788b81..4987c8caf0 100644 --- a/.github/workflows/linters.yaml +++ b/.github/workflows/linters.yaml @@ -31,7 +31,7 @@ jobs: - if: ${{ always() }} name: Upload artifact with ShellCheck defects in SARIF format - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Differential ShellCheck SARIF path: ${{ steps.ShellCheck.outputs.sarif }} From 26fb85098b044c9eda2d6fa6432d357f58c9c413 Mon Sep 17 00:00:00 2001 From: RussTreadon-NOAA <26926959+RussTreadon-NOAA@users.noreply.github.com> Date: Mon, 13 Jan 2025 23:26:08 -0500 Subject: [PATCH 21/33] Update the gdas.cd hash and enable GDASApp to run on WCOSS2 (#3220) This PR does the following: 1. update the `sorc/gdas.cd` hash to bring new GDASApp functionality into g-w 2. update `env/WCOSS2.env` 3. update the WCOSS2 section of `ush/module-setup.sh` The change to `WCOSS2.env` is due to changes introduced during the fall 2024 WCOSS2 upgrade. The change to `module-setup.sh` is required when using spack-stack on WCOSS2. Resolves #3219 Resolves #3100 --- env/WCOSS2.env | 5 +++++ sorc/gdas.cd | 2 +- ush/module-setup.sh | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/env/WCOSS2.env b/env/WCOSS2.env index fe6acf88fb..6c476cda5d 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -13,6 +13,11 @@ step=$1 export launcher="mpiexec -l" export mpmd_opt="--cpu-bind verbose,core cfp" +# TODO: Add path to GDASApp libraries and cray-mpich as temporary patches +# TODO: Remove LD_LIBRARY_PATH lines as soon as permanent solutions are available +export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${HOMEgfs}/sorc/gdas.cd/build/lib" +export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/cray/pe/mpich/8.1.19/ofi/intel/19.0/lib" + # Calculate common resource variables # Check first if the dependent variables are set if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then diff --git a/sorc/gdas.cd b/sorc/gdas.cd index d6097afdd4..15113adfdb 160000 --- a/sorc/gdas.cd +++ b/sorc/gdas.cd @@ -1 +1 @@ -Subproject commit d6097afdd435fe73cc99d8ddb594c3143b72820a +Subproject commit 15113adfdbf2500ec2d5099fc9b62b21fbfcc9b8 diff --git a/ush/module-setup.sh b/ush/module-setup.sh index 398562652d..366286d142 100755 --- a/ush/module-setup.sh +++ b/ush/module-setup.sh @@ -51,6 +51,8 @@ elif [[ ${MACHINE_ID} = s4* ]] ; then elif [[ ${MACHINE_ID} = wcoss2 ]]; then # We are on WCOSS2 + # Ignore default modules of the same version lower in the search path (req'd by spack-stack) + export LMOD_TMOD_FIND_FIRST=yes module reset elif [[ ${MACHINE_ID} = cheyenne* ]] ; then From 53ed76ecefa0c69abf17ccf909b2d3da6db35922 Mon Sep 17 00:00:00 2001 From: Kate Friedman Date: Tue, 14 Jan 2025 09:11:08 -0500 Subject: [PATCH 22/33] Turn IAU off during staging job for cold start experiments (#3215) Resolve a bug related to IAU when cold-starting an experiment. The model start time for non-atmos ICs were incorrectly being reduced by 3hrs for cold-started experiments. While a user may want IAU on (`DOIAU=YES`), the IAU switch should be set to "NO" for the staging job when it is cold-started. Resolves #2890 Resolves #2865 --- parm/config/gefs/config.stage_ic | 4 ++++ parm/config/gfs/config.stage_ic | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/parm/config/gefs/config.stage_ic b/parm/config/gefs/config.stage_ic index 8cd97b58ac..0dd76679a2 100644 --- a/parm/config/gefs/config.stage_ic +++ b/parm/config/gefs/config.stage_ic @@ -14,6 +14,10 @@ export STAGE_IC_YAML_TMPL="${PARMgfs}/stage/master_gefs.yaml.j2" source "${HOMEgfs}/versions/ic.ver" +if [[ ${EXP_WARM_START} = ".false." ]] ; then + export DOIAU="NO" # Turn off for staging +fi + # Set ICSDIR (if not defined) if [[ -z "${ICSDIR}" ]] ; then diff --git a/parm/config/gfs/config.stage_ic b/parm/config/gfs/config.stage_ic index f30e402c93..bb12cd3ba1 100644 --- a/parm/config/gfs/config.stage_ic +++ b/parm/config/gfs/config.stage_ic @@ -14,6 +14,10 @@ export STAGE_IC_YAML_TMPL="${PARMgfs}/stage/master_gfs.yaml.j2" source "${HOMEgfs}/versions/ic.ver" +if [[ ${EXP_WARM_START} = ".false." ]] ; then + export DOIAU="NO" # Turn off for staging +fi + # Set ICSDIR (if not defined) if [[ -z "${ICSDIR}" ]] ; then From aea82a8783e8aa3cdc17d502a66378a038d049e4 Mon Sep 17 00:00:00 2001 From: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> Date: Tue, 14 Jan 2025 17:02:34 +0000 Subject: [PATCH 23/33] Additional fixes for downstream jobs (#3187) This fixes a number of issues with the `postsnd` and `arch` jobs discovered during testing of HR4. Resolves #3019 --------- Co-authored-by: Rahul Mahajan --- .gitignore | 4 - ci/cases/pr/C48_S2SW_extended.yaml | 21 ++ ci/cases/yamls/gfs_extended_ci.yaml | 2 +- env/HERA.env | 5 - gempak/ush/gdas_ecmwf_meta_ver.sh | 8 +- gempak/ush/gdas_meta_loop.sh | 12 +- gempak/ush/gdas_meta_na.sh | 8 +- gempak/ush/gdas_ukmet_meta_ver.sh | 8 +- gempak/ush/gempak_gdas_f000_gif.sh | 44 ++-- gempak/ush/gempak_gfs_f000_gif.sh | 66 ++--- gempak/ush/gempak_gfs_fhhh_gif.sh | 18 +- jobs/JGDAS_ATMOS_GEMPAK_META_NCDC | 14 +- jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF | 13 +- jobs/JGLOBAL_WAVE_GEMPAK | 6 +- modulefiles/module_base.wcoss2.lua | 1 + parm/archive/gfs_downstream.yaml.j2 | 2 + parm/config/gfs/config.resources | 1 + parm/config/gfs/config.waveawipsbulls | 2 - parm/config/gfs/config.waveawipsgridded | 8 +- scripts/exgdas_atmos_gempak_gif_ncdc.sh | 2 +- scripts/exgfs_atmos_gempak_gif_ncdc_skew_t.sh | 12 +- scripts/exgfs_atmos_postsnd.sh | 2 +- scripts/exgfs_wave_nawips.sh | 16 +- scripts/exgfs_wave_prdgen_bulls.sh | 12 +- scripts/exgfs_wave_prdgen_gridded.sh | 151 ++++++----- scripts/exglobal_archive.py | 2 +- sorc/gfs_utils.fd | 2 +- sorc/link_workflow.sh | 3 - ush/gfs_bufr.sh | 15 +- ush/make_NTC_file.pl | 118 +++++++++ ush/make_ntc_bull.pl | 242 ++++++++++++++++++ ush/make_tif.sh | 31 +++ ush/month_name.sh | 113 ++++++++ versions/run.wcoss2.ver | 1 + workflow/applications/gfs_cycled.py | 9 +- workflow/applications/gfs_forecast_only.py | 4 +- workflow/hosts/hera.yaml | 1 + workflow/hosts/hercules.yaml | 1 + workflow/hosts/orion.yaml | 1 + workflow/rocoto/gfs_tasks.py | 15 +- 40 files changed, 772 insertions(+), 224 deletions(-) create mode 100644 ci/cases/pr/C48_S2SW_extended.yaml create mode 100755 ush/make_NTC_file.pl create mode 100755 ush/make_ntc_bull.pl create mode 100755 ush/make_tif.sh create mode 100755 ush/month_name.sh diff --git a/.gitignore b/.gitignore index f3cb1e1b3e..7669dac845 100644 --- a/.gitignore +++ b/.gitignore @@ -162,10 +162,6 @@ ush/global_cycle_driver.sh ush/jediinc2fv3.py ush/ufsda ush/soca -ush/make_NTC_file.pl -ush/make_ntc_bull.pl -ush/make_tif.sh -ush/month_name.sh ush/imsfv3_scf2ioda.py ush/atparse.bash ush/run_bufr2ioda.py diff --git a/ci/cases/pr/C48_S2SW_extended.yaml b/ci/cases/pr/C48_S2SW_extended.yaml new file mode 100644 index 0000000000..f3c2a567b7 --- /dev/null +++ b/ci/cases/pr/C48_S2SW_extended.yaml @@ -0,0 +1,21 @@ +experiment: + system: gfs + mode: forecast-only + +arguments: + pslot: {{ 'pslot' | getenv }} + app: S2SW + resdetatmos: 48 + resdetocean: 5.0 + comroot: {{ 'RUNTESTS' | getenv }}/COMROOT + expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR + idate: 2021032312 + edate: 2021032312 + yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_extended_ci.yaml + +skip_ci_on_hosts: + - hera + - gaea + - orion + - hercules + - wcoss2 # TODO run on WCOSS2 once the gfs_waveawipsbulls job is fixed diff --git a/ci/cases/yamls/gfs_extended_ci.yaml b/ci/cases/yamls/gfs_extended_ci.yaml index 2e7a173994..12ccda6bbd 100644 --- a/ci/cases/yamls/gfs_extended_ci.yaml +++ b/ci/cases/yamls/gfs_extended_ci.yaml @@ -6,7 +6,7 @@ base: DO_GOES: "YES" DO_BUFRSND: "YES" DO_GEMPAK: "YES" - DO_AWIPS: "NO" + DO_AWIPS: "YES" DO_NPOESS: "YES" DO_GENESIS_FSU: "NO" FCST_BREAKPOINTS: 192 diff --git a/env/HERA.env b/env/HERA.env index 051287004b..ec49724842 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -133,11 +133,6 @@ elif [[ "${step}" = "marinebmat" ]]; then export APRUNCFP="${launcher} -n \$ncmd --multi-prog" export APRUN_MARINEBMAT="${APRUN_default}" -elif [[ "${step}" = "marinebmat" ]]; then - - export APRUNCFP="${launcher} -n \$ncmd --multi-prog" - export APRUN_MARINEBMAT="${APRUN_default}" - elif [[ "${step}" = "marineanlvar" ]]; then export APRUNCFP="${launcher} -n \$ncmd --multi-prog" diff --git a/gempak/ush/gdas_ecmwf_meta_ver.sh b/gempak/ush/gdas_ecmwf_meta_ver.sh index b038be6c25..66bd761979 100755 --- a/gempak/ush/gdas_ecmwf_meta_ver.sh +++ b/gempak/ush/gdas_ecmwf_meta_ver.sh @@ -24,7 +24,7 @@ fi export COMIN="gdas.${PDY}${cyc}" if [[ ! -L ${COMIN} ]]; then - ${NLN} "${COM_ATMOS_GEMPAK_1p00}" "${COMIN}" + ${NLN} "${COMIN_ATMOS_GEMPAK_1p00}" "${COMIN}" fi vergrid="F-GDAS | ${PDY:2}/0600" fcsthr="0600f006" @@ -152,16 +152,16 @@ if (( err != 0 )) || [[ ! -s ecmwfver.meta ]]; then exit "${err}" fi -mv ecmwfver.meta "${COM_ATMOS_GEMPAK_META}/ecmwfver_${PDY}_${cyc2}" +mv ecmwfver.meta "${COMOUT_ATMOS_GEMPAK_META}/ecmwfver_${PDY}_${cyc2}" export err=$? if (( err != 0 )) ; then - echo "FATAL ERROR: Failed to move meta file to ${COM_ATMOS_GEMPAK_META}/ecmwfver_${PDY}_${cyc2}" + echo "FATAL ERROR: Failed to move meta file to ${COMOUT_ATMOS_GEMPAK_META}/ecmwfver_${PDY}_${cyc2}" exit "${err}" fi if [[ "${SENDDBN}" == "YES" ]] ; then "${DBNROOT}/bin/dbn_alert" MODEL ECMWFVER_HPCMETAFILE "${job}" \ - "${COM_ATMOS_GEMPAK_META}/ecmwfver_${PDY}_${cyc2}" + "${COMOUT_ATMOS_GEMPAK_META}/ecmwfver_${PDY}_${cyc2}" fi exit diff --git a/gempak/ush/gdas_meta_loop.sh b/gempak/ush/gdas_meta_loop.sh index 3191789c9b..8baa4c2899 100755 --- a/gempak/ush/gdas_meta_loop.sh +++ b/gempak/ush/gdas_meta_loop.sh @@ -13,7 +13,7 @@ device="nc | gdasloop.meta" # export COMIN="${RUN}.${PDY}${cyc}" if [[ ! -L "${COMIN}" ]]; then - ${NLN} "${COM_ATMOS_GEMPAK_1p00}" "${COMIN}" + ${NLN} "${COMIN_ATMOS_GEMPAK_1p00}" "${COMIN}" fi if [[ "${envir}" == "para" ]] ; then @@ -38,10 +38,10 @@ for (( fhr=24; fhr<=144; fhr+=24 )); do cycles=$(seq -s ' ' -f "%02g" 0 6 "${cyc}") for cycle in ${cycles}; do # Test with GDAS in PROD - YMD=${day} HH=${cyc} GRID=1p00 declare_from_tmpl "COM_ATMOS_GEMPAK_1p00_past:COM_ATMOS_GEMPAK_TMPL" + YMD=${day} HH=${cyc} GRID=1p00 declare_from_tmpl "COMIN_ATMOS_GEMPAK_1p00_past:COM_ATMOS_GEMPAK_TMPL" export COMIN="${RUN}.${day}${cycle}" if [[ ! -L "${COMIN}" ]]; then - ${NLN} "${COM_ATMOS_GEMPAK_1p00_past}" "${COMIN}" + ${NLN} "${COMIN_ATMOS_GEMPAK_1p00_past}" "${COMIN}" fi gdfile="${COMIN}/gdas_1p00_${day}${cycle}f000" @@ -228,16 +228,16 @@ if (( err != 0 )) || [[ ! -s gdasloop.meta ]]; then exit "${err}" fi -mv gdasloop.meta "${COM_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_loop" +mv gdasloop.meta "${COMOUT_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_loop" export err=$? if (( err != 0 )) ; then - echo "FATAL ERROR: Failed to move meta file to ${COM_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_loop" + echo "FATAL ERROR: Failed to move meta file to ${COMOUT_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_loop" exit "${err}" fi if [[ ${SENDDBN} == "YES" ]] ; then "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ - "${COM_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_loop" + "${COMOUT_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_loop" fi exit diff --git a/gempak/ush/gdas_meta_na.sh b/gempak/ush/gdas_meta_na.sh index 6a7e0a28c3..8873fa5a7c 100755 --- a/gempak/ush/gdas_meta_na.sh +++ b/gempak/ush/gdas_meta_na.sh @@ -13,7 +13,7 @@ device="nc | gdas.meta" # export COMIN="${RUN}.${PDY}${cyc}" if [[ ! -L "${COMIN}" ]]; then - ${NLN} "${COM_ATMOS_GEMPAK_1p00}" "${COMIN}" + ${NLN} "${COMIN_ATMOS_GEMPAK_1p00}" "${COMIN}" fi if [[ "${envir}" == "para" ]] ; then @@ -99,14 +99,14 @@ if (( err != 0 )) || [[ ! -s gdas.meta ]] &> /dev/null; then exit "${err}" fi -mv gdas.meta "${COM_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_na" +mv gdas.meta "${COMOUT_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_na" export err=$? if (( err != 0 )) ; then - echo "FATAL ERROR: Failed to move meta file to ${COM_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_na" + echo "FATAL ERROR: Failed to move meta file to ${COMOUT_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_na" exit "${err}" fi if [[ "${SENDDBN}" == "YES" ]] ; then "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ - "${COM_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_na" + "${COMOUT_ATMOS_GEMPAK_META}/gdas_${PDY}_${cyc}_na" fi diff --git a/gempak/ush/gdas_ukmet_meta_ver.sh b/gempak/ush/gdas_ukmet_meta_ver.sh index be3d459e8c..065a3e95af 100755 --- a/gempak/ush/gdas_ukmet_meta_ver.sh +++ b/gempak/ush/gdas_ukmet_meta_ver.sh @@ -23,7 +23,7 @@ cp "${HOMEgfs}/gempak/fix/datatype.tbl" datatype.tbl # SET CURRENT CYCLE AS THE VERIFICATION GRIDDED FILE. export COMIN="gdas.${PDY}${cyc}" if [[ ! -L ${COMIN} ]]; then - ${NLN} "${COM_ATMOS_GEMPAK_1p00}" "${COMIN}" + ${NLN} "${COMINT_ATMOS_GEMPAK_1p00}" "${COMIN}" fi vergrid="F-GDAS | ${PDY:2}/0600" fcsthr="0600f006" @@ -155,16 +155,16 @@ if (( err != 0 )) || [[ ! -s ukmetver_12.meta ]]; then exit "${err}" fi -mv ukmetver_12.meta "${COM_ATMOS_GEMPAK_META}/ukmetver_${PDY}_12" +mv ukmetver_12.meta "${COMOUT_ATMOS_GEMPAK_META}/ukmetver_${PDY}_12" export err=$? if (( err != 0 )) ; then - echo "FATAL ERROR: Failed to move meta file to ${COM_ATMOS_GEMPAK_META}/ukmetver_${PDY}_12" + echo "FATAL ERROR: Failed to move meta file to ${COMOUT_ATMOS_GEMPAK_META}/ukmetver_${PDY}_12" exit "${err}" fi if [[ "${SENDDBN}" == "YES" ]] ; then "${DBNROOT}/bin/dbn_alert" MODEL UKMETVER_HPCMETAFILE "${job}" \ - "${COM_ATMOS_GEMPAK_META}/ukmetver_${PDY}_12" + "${COMOUT_ATMOS_GEMPAK_META}/ukmetver_${PDY}_12" fi exit diff --git a/gempak/ush/gempak_gdas_f000_gif.sh b/gempak/ush/gempak_gdas_f000_gif.sh index 80e28f5345..3e7ebf3c47 100755 --- a/gempak/ush/gempak_gdas_f000_gif.sh +++ b/gempak/ush/gempak_gdas_f000_gif.sh @@ -274,24 +274,24 @@ EOF # Copy the GIF images into my area -cp "${hgttmp850dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgttmp700dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgttmp500dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgtiso300dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgtiso250dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgtiso200dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${mslpthksfcdev}" "${COM_ATMOS_GEMPAK_GIF}/." +cp "${hgttmp850dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgttmp700dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgttmp500dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgtiso300dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgtiso250dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgtiso200dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${mslpthksfcdev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." # Send the GIF images onto the NCDC area on the public ftp server if [[ ${SENDDBN} == YES ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp850dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp700dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp500dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso300dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso250dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso200dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp850dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp700dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp500dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso300dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso250dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso200dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" fi @@ -443,18 +443,18 @@ EOF # Copy the GIF images into my area -cp "${mslpthksfcdev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgttmp500dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgtiso300dev}" "${COM_ATMOS_GEMPAK_GIF}/." -cp "${hgtiso250dev}" "${COM_ATMOS_GEMPAK_GIF}/." +cp "${mslpthksfcdev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgttmp500dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgtiso300dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." +cp "${hgtiso250dev}" "${COMOUT_ATMOS_GEMPAK_GIF}/." # Copy the GIF images onto the NCDC area on the public ftp server if [[ ${SENDDBN} == YES ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp500dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso300dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso250dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp500dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso300dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso250dev}" fi exit diff --git a/gempak/ush/gempak_gfs_f000_gif.sh b/gempak/ush/gempak_gfs_f000_gif.sh index 6a709fcc16..4393c01a7e 100755 --- a/gempak/ush/gempak_gfs_f000_gif.sh +++ b/gempak/ush/gempak_gfs_f000_gif.sh @@ -15,6 +15,8 @@ # ######################################################################### +source "${HOMEgfs}/ush/preamble.sh" + LATVAL="1/1/1/1/5;5" pixels="1728;1472" cp "${HOMEgfs}/gempak/fix/coltbl.spc" coltbl.xwp @@ -533,41 +535,41 @@ EOF # Copy the GIF images into my area -cp "${hgttmp700dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgttmp500dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtiso300dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtiso250dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgttmp250dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtiso200dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtiso100dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgttmp100dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${mslpthksfcdev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${mslpthksfcusdev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtvor500dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtvor500usdev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${liftdev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${prswshtropdev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${rhvvel700dev}" "${COM_ATMOS_GEMPAK_GIF}" +cp "${hgttmp700dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgttmp500dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtiso300dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtiso250dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgttmp250dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtiso200dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtiso100dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgttmp100dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${mslpthksfcdev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${mslpthksfcusdev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtvor500dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtvor500usdev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${liftdev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${prswshtropdev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${rhvvel700dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" # Copy the GIF images onto the NCDC area on the public ftp server if [[ "${SENDDBN}" == "YES" ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp700dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp500dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso300dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso250dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp250dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso200dev}" -# "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp200dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtiso100dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgttmp100dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${mslpthksfcusdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtvor500dev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtvor500usdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${liftdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${prswshtropdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${rhvvel700dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp700dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp500dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso300dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso250dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp250dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso200dev}" +# "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp200dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtiso100dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp100dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${mslpthksfcusdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtvor500dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtvor500usdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${liftdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${prswshtropdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${rhvvel700dev}" @@ -576,7 +578,7 @@ fi # Convert the 500mb NH Hgts/Temps chart to tif, attach a heading and # send to TOC via the NTC -export input=${COM_ATMOS_GEMPAK_GIF}/${hgttmp500dev} +export input=${COMOUT_ATMOS_GEMPAK_GIF}/${hgttmp500dev} export HEADER=YES export OUTPATH=${DATA}/gfs_500_hgt_tmp_nh_anl_${cyc}.tif "${USHgfs}/make_tif.sh" diff --git a/gempak/ush/gempak_gfs_fhhh_gif.sh b/gempak/ush/gempak_gfs_fhhh_gif.sh index 33f5764068..2a89219ecc 100755 --- a/gempak/ush/gempak_gfs_fhhh_gif.sh +++ b/gempak/ush/gempak_gfs_fhhh_gif.sh @@ -7,6 +7,8 @@ # ######################################################################### +source "${HOMEgfs}/ush/preamble.sh" + LATVAL="1/1/1/1/5;5" pixels="1728;1472" cp "${HOMEgfs}/gempak/fix/coltbl.spc" coltbl.xwp @@ -170,18 +172,18 @@ EOF # Copy the GIF images into my area -cp "${mslpthksfcdev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtvor500dev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${hgtvor500usdev}" "${COM_ATMOS_GEMPAK_GIF}" -cp "${rhvvel700dev}" "${COM_ATMOS_GEMPAK_GIF}" +cp "${mslpthksfcdev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtvor500dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${hgtvor500usdev}" "${COMOUT_ATMOS_GEMPAK_GIF}" +cp "${rhvvel700dev}" "${COMOUT_ATMOS_GEMPAK_GIF}" # Copy the GIF images onto the NCDC area on the public ftp server if [[ "${SENDDBN}" == YES ]]; then - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtvor500dev}" - # "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${hgtvor500usdev}" - "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COM_ATMOS_GEMPAK_GIF}/${rhvvel700dev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${mslpthksfcdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtvor500dev}" + # "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${hgtvor500usdev}" + "${DBNROOT}/bin/dbn_alert" MODEL NCDCGIF "${job}" "${COMOUT_ATMOS_GEMPAK_GIF}/${rhvvel700dev}" fi echo "GEMPAK_GIF ${fhr3} hour completed normally" diff --git a/jobs/JGDAS_ATMOS_GEMPAK_META_NCDC b/jobs/JGDAS_ATMOS_GEMPAK_META_NCDC index dca629c845..1a31e077f9 100755 --- a/jobs/JGDAS_ATMOS_GEMPAK_META_NCDC +++ b/jobs/JGDAS_ATMOS_GEMPAK_META_NCDC @@ -34,17 +34,17 @@ export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn} ############################################## # Define COM directories ############################################## -GRID=1p00 YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COM_ATMOS_GEMPAK_1p00:COM_ATMOS_GEMPAK_TMPL" +GRID=1p00 YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COMIN_ATMOS_GEMPAK_1p00:COM_ATMOS_GEMPAK_TMPL" -GRID="meta" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COM_ATMOS_GEMPAK_META:COM_ATMOS_GEMPAK_TMPL" -if [[ ! -d "${COM_ATMOS_GEMPAK_META}" ]]; then - mkdir -m 775 -p "${COM_ATMOS_GEMPAK_META}" +GRID="meta" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COMOUT_ATMOS_GEMPAK_META:COM_ATMOS_GEMPAK_TMPL" +if [[ ! -d "${COMOUT_ATMOS_GEMPAK_META}" ]]; then + mkdir -m 775 -p "${COMOUT_ATMOS_GEMPAK_META}" fi if (( cyc%12 == 0 )); then - GRID="gif" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COM_ATMOS_GEMPAK_GIF:COM_ATMOS_GEMPAK_TMPL" - if [[ ! -d "${COM_ATMOS_GEMPAK_GIF}" ]]; then - mkdir -m 775 -p "${COM_ATMOS_GEMPAK_GIF}" + GRID="gif" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COMOUT_ATMOS_GEMPAK_GIF:COM_ATMOS_GEMPAK_TMPL" + if [[ ! -d "${COMOUT_ATMOS_GEMPAK_GIF}" ]]; then + mkdir -m 775 -p "${COMOUT_ATMOS_GEMPAK_GIF}" fi fi diff --git a/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF b/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF index d62c3320a1..b9559ed7ac 100755 --- a/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF +++ b/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF @@ -29,17 +29,19 @@ export COMPONENT="atmos" ############################################## # Define COM directories ############################################## -YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COM_OBS" -GRID=1p00 YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COM_ATMOS_GEMPAK_1p00:COM_ATMOS_GEMPAK_TMPL" +YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COMIN_OBS:COM_OBS_TMPL +GRID=1p00 YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COMIN_ATMOS_GEMPAK_1p00:COM_ATMOS_GEMPAK_TMPL" +# Declare COMOUT_ATMOS_GEMPAK_GIF and COMOUT_ATMOS_GEMPAK_UPPER_AIR for grid in gif upper_air; do - gempak_dir="COM_ATMOS_GEMPAK_${grid^^}" + _GRID="${grid^^}" + gempak_dir="COMOUT_ATMOS_GEMPAK_${_GRID}" GRID=${grid} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "${gempak_dir}:COM_ATMOS_GEMPAK_TMPL" if [[ ! -d "${!gempak_dir}" ]]; then mkdir -m 775 -p "${!gempak_dir}"; fi done -YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_WMO -if [[ ! -d "${COM_ATMOS_WMO}" ]]; then mkdir -m 775 -p "${COM_ATMOS_WMO}"; fi +YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COMOUT_ATMOS_WMO:COM_ATMOS_WMO_TMPL +if [[ ! -d "${COMOUT_ATMOS_WMO}" ]]; then mkdir -m 775 -p "${COMOUT_ATMOS_WMO}"; fi export SENDDBN=${SENDDBN:-NO} export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn} @@ -65,4 +67,3 @@ fi if [[ "${KEEPDATA}" != "YES" ]] ; then rm -rf "${DATA}" fi - diff --git a/jobs/JGLOBAL_WAVE_GEMPAK b/jobs/JGLOBAL_WAVE_GEMPAK index 9822e4d416..c7b615c560 100755 --- a/jobs/JGLOBAL_WAVE_GEMPAK +++ b/jobs/JGLOBAL_WAVE_GEMPAK @@ -13,9 +13,11 @@ export DBN_ALERT_TYPE=GFS_WAVE_GEMPAK export SENDDBN=${SENDDBN:-YES} export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn} -YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_WAVE_GRID COM_WAVE_GEMPAK +YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ + COMIN_WAVE_GRID:COM_WAVE_GRID_TMPL \ + COMOUT_WAVE_GEMPAK:COM_WAVE_GEMPAK_TMPL -if [[ ! -d ${COM_WAVE_GEMPAK} ]]; then mkdir -p "${COM_WAVE_GEMPAK}"; fi +if [[ ! -d ${COMOUT_WAVE_GEMPAK} ]]; then mkdir -p "${COMOUT_WAVE_GEMPAK}"; fi ######################################################## # Execute the script. diff --git a/modulefiles/module_base.wcoss2.lua b/modulefiles/module_base.wcoss2.lua index 830ea78b05..ad4f45f7d5 100644 --- a/modulefiles/module_base.wcoss2.lua +++ b/modulefiles/module_base.wcoss2.lua @@ -17,6 +17,7 @@ load(pathJoin("libjpeg", (os.getenv("libjpeg_ver") or "None"))) load(pathJoin("udunits", (os.getenv("udunits_ver") or "None"))) load(pathJoin("gsl", (os.getenv("gsl_ver") or "None"))) load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) +load(pathJoin("imagemagick", (os.getenv("imagemagick_ver") or "None"))) load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None"))) load(pathJoin("netcdf", (os.getenv("netcdf_ver") or "None"))) diff --git a/parm/archive/gfs_downstream.yaml.j2 b/parm/archive/gfs_downstream.yaml.j2 index 94bdd1df56..43aa4df4ba 100644 --- a/parm/archive/gfs_downstream.yaml.j2 +++ b/parm/archive/gfs_downstream.yaml.j2 @@ -3,8 +3,10 @@ gfs_downstream: name: "GFS_DOWNSTREAM" target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gfs_downstream.tar" required: + {% if DO_GEMPAK %} - "{{ COMIN_ATMOS_GEMPAK | relpath(ROTDIR) }}/gfs_{{ cycle_YMDH }}.sfc" - "{{ COMIN_ATMOS_GEMPAK | relpath(ROTDIR) }}/gfs_{{ cycle_YMDH }}.snd" + {% endif %} {% for i in range(1, NUM_SND_COLLECTIVES) %} - "{{ COMIN_ATMOS_BUFR | relpath(ROTDIR) }}/gfs_collective{{ i }}.fil" {% endfor %} diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 5acc7e5620..6a85a6de14 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -1293,6 +1293,7 @@ case ${step} in ntasks=1 threads_per_task=1 memory="4GB" + tasks_per_node=1 ;; "mos_stn_prep") diff --git a/parm/config/gfs/config.waveawipsbulls b/parm/config/gfs/config.waveawipsbulls index 65a8d5076b..e8f27dd881 100644 --- a/parm/config/gfs/config.waveawipsbulls +++ b/parm/config/gfs/config.waveawipsbulls @@ -8,6 +8,4 @@ echo "BEGIN: config.waveawipsbulls" # Get task specific resources . $EXPDIR/config.resources waveawipsbulls -export DBNROOT=/dev/null - echo "END: config.waveawipsbulls" diff --git a/parm/config/gfs/config.waveawipsgridded b/parm/config/gfs/config.waveawipsgridded index bd7c7c11e4..48cbfda6a7 100644 --- a/parm/config/gfs/config.waveawipsgridded +++ b/parm/config/gfs/config.waveawipsgridded @@ -8,6 +8,12 @@ echo "BEGIN: config.waveawipsgridded" # Get task specific resources . $EXPDIR/config.resources waveawipsgridded -export DBNROOT=/dev/null +# AWIPS output frequency +export FHMAX_HF_WAV_WMO=72 +export FHMAX_WAV_WMO=180 +export FHOUT_HF_WAV_WMO=3 +export FHOUT_WAV_WMO=6 +export FHMAX_HF_WAV_WMO=$(( FHMAX_HF_WAV_WMO > FHMAX_WAV ? FHMAX_WAV : FHMAX_HF_WAV_WMO )) +export FHMAX_WAV_WMO=$(( FHMAX_WAV_WMO > FHMAX_WAV ? FHMAX_WAV : FHMAX_WAV_WMO )) echo "END: config.waveawipsgridded" diff --git a/scripts/exgdas_atmos_gempak_gif_ncdc.sh b/scripts/exgdas_atmos_gempak_gif_ncdc.sh index 2dc460cc55..f932fb570c 100755 --- a/scripts/exgdas_atmos_gempak_gif_ncdc.sh +++ b/scripts/exgdas_atmos_gempak_gif_ncdc.sh @@ -19,7 +19,7 @@ if [[ ${MODEL} == GDAS ]]; then max_tries=180 export fhr3 for fhr3 in ${fcsthrs}; do - gempak_file="${COM_ATMOS_GEMPAK_1p00}/${RUN}_1p00_${PDY}${cyc}f${fhr3}" + gempak_file="${COMIN_ATMOS_GEMPAK_1p00}/${RUN}_1p00_${PDY}${cyc}f${fhr3}" if ! wait_for_file "${gempak_file}" "${sleep_interval}" "${max_tries}" ; then echo "FATAL ERROR: ${gempak_file} not found after ${max_tries} iterations" exit 10 diff --git a/scripts/exgfs_atmos_gempak_gif_ncdc_skew_t.sh b/scripts/exgfs_atmos_gempak_gif_ncdc_skew_t.sh index f7e981c6b6..e8a7c305d5 100755 --- a/scripts/exgfs_atmos_gempak_gif_ncdc_skew_t.sh +++ b/scripts/exgfs_atmos_gempak_gif_ncdc_skew_t.sh @@ -27,7 +27,7 @@ if [[ "${MODEL}" == GDAS ]] || [[ "${MODEL}" == GFS ]]; then max_tries=180 for fhr in ${fcsthrs}; do fhr3=$(printf %03d "${fhr}") - export GRIBFILE=${COM_ATMOS_GEMPAK_1p00}/${RUN}_1p00_${PDY}${cyc}f${fhr3} + export GRIBFILE=${COMIN_ATMOS_GEMPAK_1p00}/${RUN}_1p00_${PDY}${cyc}f${fhr3} if ! wait_for_file "${GRIBFILE}" "${sleep_interval}" "${max_tries}" ; then echo "FATAL ERROR: ${GRIBFILE} not found after ${max_tries} iterations" exit 10 @@ -50,7 +50,7 @@ export RSHPDY="${PDY:4:}${PDY:2:2}" cp "${HOMEgfs}/gempak/dictionaries/sonde.land.tbl" sonde.land.tbl cp "${HOMEgfs}/gempak/dictionaries/metar.tbl" metar.tbl sort -k 2n,2 metar.tbl > metar_stnm.tbl -cp "${COM_OBS}/${model}.${cycle}.adpupa.tm00.bufr_d" fort.40 +cp "${COMIN_OBS}/${model}.${cycle}.adpupa.tm00.bufr_d" fort.40 err=$? if (( err != 0 )) ; then echo "FATAL ERROR: File ${model}.${cycle}.adpupa.tm00.bufr_d could not be copied (does it exist?)." @@ -68,11 +68,11 @@ export filesize=$( ls -l rdbfmsua.out | awk '{print $5}' ) ################################################################ if (( filesize > 40 )); then - cp rdbfmsua.out "${COM_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperair" - cp sonde.idsms.tbl "${COM_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperairtble" + cp rdbfmsua.out "${COMOUT_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperair" + cp sonde.idsms.tbl "${COMOUT_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperairtble" if [[ ${SENDDBN} = "YES" ]]; then - "${DBNROOT}/bin/dbn_alert" DATA MSUPPER_AIR "${job}" "${COM_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperair" - "${DBNROOT}/bin/dbn_alert" DATA MSUPPER_AIRTBL "${job}" "${COM_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperairtble" + "${DBNROOT}/bin/dbn_alert" DATA MSUPPER_AIR "${job}" "${COMOUT_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperair" + "${DBNROOT}/bin/dbn_alert" DATA MSUPPER_AIRTBL "${job}" "${COMOUT_ATMOS_GEMPAK_UPPER_AIR}/${RUN}.${cycle}.msupperairtble" fi fi diff --git a/scripts/exgfs_atmos_postsnd.sh b/scripts/exgfs_atmos_postsnd.sh index 8f2aa43568..61761b936d 100755 --- a/scripts/exgfs_atmos_postsnd.sh +++ b/scripts/exgfs_atmos_postsnd.sh @@ -117,7 +117,7 @@ for fhr in "${hour_list[@]}"; do echo "File ${filename} is required but not found." err_exit "FATAL ERROR: logf${fhr} not found." else - echo "${runscript} \"${fhr}\" \"${fhr_p}\" \"${FINT}\" \"${F00FLAG}\" \"${DATA}/${fhr}\"" >> "${DATA}/poescript_bufr" + echo "${runscript} ${fhr} ${fhr_p} ${FINT} ${F00FLAG} ${DATA}/${fhr}" >> "${DATA}/poescript_bufr" fi done diff --git a/scripts/exgfs_wave_nawips.sh b/scripts/exgfs_wave_nawips.sh index 949425cbc1..68ebfc90c7 100755 --- a/scripts/exgfs_wave_nawips.sh +++ b/scripts/exgfs_wave_nawips.sh @@ -14,7 +14,7 @@ source "${USHgfs}/preamble.sh" #export grids=${grids:-'glo_30m at_10m ep_10m wc_10m ao_9km'} #Interpolated grids -export grids=${grids:-'glo_30m'} #Native grids +export grids=${grids:-${waveinterpGRD:-'glo_30m'}} #Native grids export RUNwave=${RUNwave:-${RUN}wave} export fstart=${fstart:-0} export FHMAX_WAV=${FHMAX_WAV:-180} #180 Total of hours to process @@ -68,13 +68,15 @@ while [ ${fhcnt} -le ${FHMAX_WAV} ]; do gso_15m) grdIDin='gsouth.0p25' #grdIDout='gfswaves25k' ;; grdIDout='gfswavesh' ;; - *) gridIDin= + glo_200) grdIDin='global.2p00' + grdIDout='gfswaves200k' ;; + *) grdIDin= grdIDout= ;; esac - GRIBIN="${COM_WAVE_GRID}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2" + GRIBIN="${COMIN_WAVE_GRID}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2" GRIBIN_chk=${GRIBIN}.idx if ! wait_for_file "${GRIBIN_chk}" "${sleep_interval}" "${maxtries}"; then - echo "FATAL ERROR: ${GRIBIN_chk} not found after waiting $((sleep_interval * ( max_tries - 1))) secs" + echo "FATAL ERROR: ${GRIBIN_chk} not found after waiting $((sleep_interval * ( maxtries - 1))) secs" echo "${RUNwave} ${grdID} ${fhr} prdgen ${date} ${cycle} : GRIB file missing." >> "${wavelog}" err=1;export err;"${errchk}" || exit "${err}" fi @@ -140,11 +142,11 @@ while [ ${fhcnt} -le ${FHMAX_WAV} ]; do gpend fi - cpfs "${GEMGRD}" "${COM_WAVE_GEMPAK}/${GEMGRD}" + cpfs "${GEMGRD}" "${COMOUT_WAVE_GEMPAK}/${GEMGRD}" if [ ${SENDDBN} = "YES" ] ; then - "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" "${COM_WAVE_GEMPAK}/${GEMGRD}" + "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" "${COMOUT_WAVE_GEMPAK}/${GEMGRD}" else - echo "##### DBN_ALERT is: MODEL ${DBN_ALERT_TYPE} ${job} ${COM_WAVE_GEMPAK}/${GEMGRD}#####" + echo "##### DBN_ALERT is: MODEL ${DBN_ALERT_TYPE} ${job} ${COMOUT_WAVE_GEMPAK}/${GEMGRD}#####" fi rm grib_${grid} done diff --git a/scripts/exgfs_wave_prdgen_bulls.sh b/scripts/exgfs_wave_prdgen_bulls.sh index 5f5b2c531e..714b9a82b2 100755 --- a/scripts/exgfs_wave_prdgen_bulls.sh +++ b/scripts/exgfs_wave_prdgen_bulls.sh @@ -150,18 +150,18 @@ source "${USHgfs}/preamble.sh" echo ' Looping over buoys ... \n' for bull in $bulls; do - fname="${RUNwave}.$bull.cbull" + fname="${RUNwave}.${bull}.cbull" oname="awipsbull.$bull.$cycle.${RUNwave}" headr=$(grep "b${bull}=" awipsbull.data | sed 's/=/ /g' | awk '{ print $3}') echo " Processing $bull ($headr $oname) ..." - if [ -z "$headr" ] || [ ! -s $fname ]; then + if [[ -z "${headr}" ]] || [[ ! -s "${fname}" ]]; then set_trace - msg="ABNORMAL EXIT: MISSING BULLETING INFO" + msg="ABNORMAL EXIT: MISSING BULLETIN INFO" set +x echo ' ' echo '******************************************** ' - echo '*** FATAL ERROR : MISSING BULLETING INFO *** ' + echo '*** FATAL ERROR : MISSING BULLETIN INFO *** ' echo '******************************************** ' echo ' ' echo $msg @@ -177,7 +177,7 @@ source "${USHgfs}/preamble.sh" -p "${COMOUT_WAVE_WMO}" -s "NO" -o "${oname}" > formbul.out 2>&1 OK=$? - if [ "$OK" != '0' ] || [ ! -f $oname ]; then + if [[ ${OK} -ne 0 ]] || [[ ! -f "${oname}" ]]; then set_trace cat formbul.out msg="ABNORMAL EXIT: ERROR IN formbul" @@ -194,7 +194,7 @@ source "${USHgfs}/preamble.sh" exit $err fi - cat $oname >> awipsbull.$cycle.${RUNwave} + cat "${oname}" >> "awipsbull.$cycle.${RUNwave}" done diff --git a/scripts/exgfs_wave_prdgen_gridded.sh b/scripts/exgfs_wave_prdgen_gridded.sh index 9111c81273..8fd6b5cc76 100755 --- a/scripts/exgfs_wave_prdgen_gridded.sh +++ b/scripts/exgfs_wave_prdgen_gridded.sh @@ -9,7 +9,7 @@ # - Supplemental error output is witten to the wave.log file. # # # # COM inputs: # -# - ${COMIN_WAVE_GRID}/${RUNwave}.${cycle}.${grdID}.f${fhr}.grib2 # +# - ${COMIN_WAVE_GRID}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2 # # # # COM outputs: # # - ${COMOUT_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut} # @@ -31,38 +31,40 @@ source "${USHgfs}/preamble.sh" export RUNwave=${RUNwave:-${RUN}wave} export envir=${envir:-ops} export fstart=${fstart:-0} - export FHMAX_WAV=${FHMAX_WAV:-180} #180 Total of hours to process - export FHMAX_HF_WAV=${FHMAX_HF_WAV:-72} #from 00 to 72 inc=3 - export FHOUT_WAV=${FHOUT_WAV:-6} #from 72 to 180 inc=6 - export FHOUT_HF_WAV=${FHOUT_HF_WAV:-3} + export FHMAX_WAV=${FHMAX_WAV_WMO:-180} #180 Total of hours to process + export FHMAX_HF_WAV=${FHMAX_HF_WAV_WMO:-72} #from 00 to 72 inc=3 + export FHOUT_WAV=${FHOUT_WAV_WMO:-6} #from 72 to 180 inc=6 + export FHOUT_HF_WAV=${FHOUT_HF_WAV_WMO:-3} export maxtries=720 export cyc=${cyc:-00} export cycle=${cycle:-t${cyc}z} export pgmout=OUTPUT.$$ export DATA=${DATA:-${DATAROOT:?}/${job}.$$} - mkdir -p $DATA - cd $DATA + mkdir -p "${DATA}" + cd "${DATA}" || exit 1 export wavelog=${DATA}/${RUNwave}_prdggridded.log echo "Starting MWW3 GRIDDED PRODUCTS SCRIPT" +# Input grid +grid_in="${waveinterpGRD:-glo_15mxt}" # Output grids - # grids=${grids:-ao_9km at_10m ep_10m wc_10m glo_30m} -grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m} +grids=${grids:-ao_9km at_10m ep_10m wc_10m glo_30m} # export grids=${wavepostGRD} maxtries=${maxtries:-720} # 0.b Date and time stuff - export date=$PDY + start_time=$(date) + export date=${PDY} export YMDH=${PDY}${cyc} echo ' ' echo ' ****************************' echo ' *** MWW3 PRODUCTS SCRIPT ***' echo ' ****************************' - echo " $date $cycle" + echo " ${date} ${cycle}" echo ' ' - echo "Starting at : $(date)" + echo "Starting at : ${start_time}" echo ' ' echo " AWIPS grib fields" - echo " Wave Grids : $grids" + echo " Wave Grids : ${grids}" echo ' ' set_trace @@ -79,16 +81,28 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m} ASWDIR=(SWDIR1 SWDIR2) # Indices of DIRECTIONS from partitions # (should be same as ASWELL) #export arrpar=(WIND UGRD VGRD HTSGW PERPW DIRPW WVHGT WVPER WVDIR WDIR ${ASWELL[@]} ${ASWDIR[@]} ${ASWPER[@]}) - export arrpar=(WIND WDIR UGRD VGRD HTSGW PERPW DIRPW WVHGT ${ASWELL[@]} WVPER ${ASWPER[@]} WVDIR ${ASWDIR[@]} ) - export nparam=$(echo ${arrpar[@]} | wc -w) + export arrpar=(WIND WDIR UGRD VGRD HTSGW PERPW DIRPW WVHGT "${ASWELL[@]}" WVPER "${ASWPER[@]}" WVDIR "${ASWDIR[@]}" ) + export nparam=$(echo "${arrpar[@]}" | wc -w) # 1.a Grib file (AWIPS and FAX charts) - fhcnt=$fstart - while [ $fhcnt -le $FHMAX_WAV ]; do - fhr=$(printf "%03d" $fhcnt) - for grdOut in $grids;do - case $grdOut in + # Get input grid + # TODO flesh this out with additional input grids if needed + case ${grid_in} in + glo_200) + grdIDin='global.2p00' ;; + glo_15mxt) + grdIDin='global.0p25' ;; + *) + echo "FATAL ERROR Unrecognized input grid ${grid_in}" + exit 2;; + esac + + fhcnt=${fstart} + while [[ "${fhcnt}" -le "${FHMAX_WAV}" ]]; do + fhr=$(printf "%03d" "${fhcnt}") + for grdOut in ${grids}; do + case ${grdOut} in ao_9km) grdID='arctic.9km' ;; at_10m) grdID='atlocn.0p16' ;; ep_10m) grdID='epacif.0p16' ;; @@ -100,56 +114,58 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m} esac # - GRIBIN="${COM_WAVE_GRID}/${RUNwave}.${cycle}.${grdID}.f${fhr}.grib2" + GRIBIN="${COMIN_WAVE_GRID}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2" GRIBIN_chk="${GRIBIN}.idx" sleep_interval=5 max_tries=1000 if ! wait_for_file "${GRIBIN_chk}" "${sleep_interval}" "${max_tries}"; then echo "FATAL ERROR: ${GRIBIN_chk} not found after waiting $((sleep_interval * ( max_tries - 1))) secs" - echo "$RUNwave $grdID ${fhr} prdgen $date $cycle : GRIB file missing." >> $wavelog - err=1;export err;${errchk} || exit ${err} + echo "${RUNwave} ${grdIDin} ${fhr} prdgen ${date} ${cycle} : GRIB file missing." >> "${wavelog}" + err=1;export err;${errchk} || exit "${err}" fi - GRIBOUT=$RUNwave.$cycle.$grdID.f${fhr}.clipped.grib2 + GRIBOUT="${RUNwave}.${cycle}.${grdID}.f${fhr}.clipped.grib2" iparam=1 - while [ ${iparam} -le ${nparam} ]; do - nip=${arrpar[$iparam-1]} - prepar=$(echo $nip | rev | cut -c2- | rev) #Part prefix (assumes 1 digit index) - paridx=$(echo $nip | rev | cut -c-1) + while [[ "${iparam}" -le "${nparam}" ]]; do + nip=${arrpar[${iparam}-1]} + prepar=${nip::-1} # Part prefix (assumes 1 digit index) + paridx="${nip:0-1}" npart=0 - case $prepar in + case ${prepar} in SWELL) npart=1 ;; SWDIR) npart=1 ;; SWPER) npart=1 ;; *) npart=0 ;; esac - echo $nip $prepar $paridx $npart - rm temp.grib2 - if [ "${npart}" = "0" ]; then - $WGRIB2 $GRIBIN -s | grep ":${nip}" | $WGRIB2 -i $GRIBIN -grib temp.grib2 > wgrib.out 2>&1 - $WGRIB2 temp.grib2 -append -grib $GRIBOUT + echo "${nip} ${prepar} ${paridx} ${npart}" + rm -f temp.grib2 + if [[ "${npart}" -eq 0 ]]; then + #shellcheck disable=SC2312 + ${WGRIB2} "${GRIBIN}" -s | grep ":${nip}" | "${WGRIB2}" -i "${GRIBIN}" -grib temp.grib2 > wgrib.out 2>&1 + #shellcheck disable=SC2312 + ${WGRIB2} temp.grib2 -append -grib "${GRIBOUT}" else - $WGRIB2 $GRIBIN -s | grep ":${prepar}" | grep "${paridx} in sequence" | \ - $WGRIB2 -i $GRIBIN -grib temp.grib2 > wgrib.out 2>&1 - $WGRIB2 temp.grib2 -append -grib $GRIBOUT + #shellcheck disable=SC2312 + ${WGRIB2} "${GRIBIN}" -s | grep ":${prepar}" | grep "${paridx} in sequence" | \ + ${WGRIB2} -i "${GRIBIN}" -grib temp.grib2 > wgrib.out 2>&1 + ${WGRIB2} temp.grib2 -append -grib "${GRIBOUT}" fi - iparam=$(expr ${iparam} + 1) + iparam=$(( iparam + 1 )) done #end wave param loop #====================================================================== - GRIBIN=$RUNwave.$cycle.$grdID.f${fhr}.clipped.grib2 - GRIBIN_chk=$GRIBIN.idx + GRIBIN="${RUNwave}.${cycle}.${grdID}.f${fhr}.clipped.grib2" - ${NLN} $GRIBIN gribfile.$grdID.f${fhr} + ${NLN} "${GRIBIN}" "gribfile.${grdID}.f${fhr}" # # 1.d Input template files - parmfile=${PARMgfs}/wave/grib2_${RUNwave}.$grdOut.f${fhr} - if [ -f $parmfile ]; then - ${NLN} $parmfile awipsgrb.$grdID.f${fhr} + parmfile="${PARMgfs}/wave/grib2_${RUNwave}.${grdOut}.f${fhr}" + if [[ -f "${parmfile}" ]]; then + ${NLN} "${parmfile}" "awipsgrb.${grdID}.f${fhr}" else - echo '*** ERROR : NO template grib2_${RUNwave}.$grdID.f${fhr} *** ' - echo "$RUNwave $grdID $fhr prdgen $date $cycle : GRIB template file missing." >> $wavelog - err=3;export err;${errchk} || exit ${err} + echo "FATAL ERROR: NO template grib2_${RUNwave}.${grdID}.f${fhr}" + echo "${RUNwave} ${grdID} ${fhr} prdgen ${date} ${cycle} : GRIB template file missing." >> "${wavelog}" + err=3;export err;${errchk} || exit "${err}" fi # # 2. AWIPS product generation @@ -161,26 +177,25 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m} # 2.a.1 Set up for tocgrib2 echo " Do set up for tocgrib2." set_trace - #AWIPSGRB=awipsgrib.$grdID.f${fhr} AWIPSGRB=awipsgrib # 2.a.2 Make GRIB index echo " Make GRIB index for tocgrib2." set_trace - $GRB2INDEX gribfile.$grdID.f${fhr} gribindex.$grdID.f${fhr} + ${GRB2INDEX} "gribfile.${grdID}.f${fhr}" "gribindex.${grdID}.f${fhr}" OK=$? - if [ "$OK" != '0' ] + if [[ ${OK} -ne 0 ]] then - msg="ABNORMAL EXIT: ERROR IN grb2index MWW3 for grid $grdID" + msg="ABNORMAL EXIT: ERROR IN grb2index MWW3 for grid ${grdID}" #set +x echo ' ' echo '******************************************** ' echo '*** FATAL ERROR : ERROR IN grb2index MWW3 *** ' echo '******************************************** ' echo ' ' - echo $msg + echo "${msg}" #set_trace - echo "$RUNwave $grdID prdgen $date $cycle : error in grbindex." >> $wavelog + echo "${RUNwave} ${grdID} prdgen ${date} ${cycle} : error in grbindex." >> "${wavelog}" err=4;export err;err_chk fi @@ -192,13 +207,13 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m} export pgmout=tocgrib2.out . prep_step - export FORT11="gribfile.$grdID.f${fhr}" - export FORT31="gribindex.$grdID.f${fhr}" - export FORT51="$AWIPSGRB.$grdID.f${fhr}" + export FORT11="gribfile.${grdID}.f${fhr}" + export FORT31="gribindex.${grdID}.f${fhr}" + export FORT51="${AWIPSGRB}.${grdID}.f${fhr}" - $TOCGRIB2 < awipsgrb.$grdID.f${fhr} > tocgrib2.out 2>&1 + ${TOCGRIB2} < "awipsgrb.${grdID}.f${fhr}" > tocgrib2.out 2>&1 OK=$? - if [ "$OK" != '0' ]; then + if [[ ${OK} -ne 0 ]]; then cat tocgrib2.out msg="ABNORMAL EXIT: ERROR IN tocgrib2" #set +x @@ -207,9 +222,9 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m} echo '*** FATAL ERROR : ERROR IN tocgrib2 *** ' echo '*************************************** ' echo ' ' - echo $msg + echo "${msg}" #set_trace - echo "$RUNwave prdgen $date $cycle : error in tocgrib2." >> $wavelog + echo "${RUNwave} prdgen ${date} ${cycle} : error in tocgrib2." >> "${wavelog}" err=5;export err;err_chk else echo '*** tocgrib2 ran succesfully *** ' @@ -219,27 +234,27 @@ grids=${grids:-ak_10m at_10m ep_10m wc_10m glo_30m} echo " Get awips GRIB bulletins out ..." #set_trace #set +x - echo " Saving $AWIPSGRB.$grdOut.f${fhr} as grib2.$cycle.awipsww3_${grdID}.f${fhr}" + echo " Saving ${AWIPSGRB}.${grdOut}.f${fhr} as grib2.${cycle}.awipsww3_${grdID}.f${fhr}" echo " in ${COMOUT_WAVE_WMO}" #set_trace cp "${AWIPSGRB}.${grdID}.f${fhr}" "${COMOUT_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut}" #set +x - if [ "$SENDDBN" = 'YES' ] + if [[ "${SENDDBN}" != 'YES' ]] then - echo " Sending $AWIPSGRB.$grdID.f${fhr} to DBRUN." + echo " Sending ${AWIPSGRB}.${grdID}.f${fhr} to DBRUN." "${DBNROOT}/bin/dbn_alert" GRIB_LOW "${RUN}" "${job}" "${COMOUT_WAVE_WMO}/grib2.${cycle}.f${fhr}.awipsww3_${grdOut}" fi - rm -f $AWIPSGRB.$grdID.f${fhr} tocgrib2.out + rm -f "${AWIPSGRB}.${grdID}.f${fhr}" tocgrib2.out done # For grids - if [ $fhcnt -ge $FHMAX_HF_WAV ]; then - inc=$FHOUT_WAV + if [[ ${fhcnt} -ge ${FHMAX_HF_WAV} ]]; then + inc="${FHOUT_WAV}" else - inc=$FHOUT_HF_WAV + inc="${FHOUT_HF_WAV}" fi - let fhcnt=fhcnt+inc + ((fhcnt = fhcnt+inc)) done #For fcst time diff --git a/scripts/exglobal_archive.py b/scripts/exglobal_archive.py index ae613fb39c..df49704e06 100755 --- a/scripts/exglobal_archive.py +++ b/scripts/exglobal_archive.py @@ -40,7 +40,7 @@ def main(): 'NMEM_ENS', 'DO_JEDIATMVAR', 'DO_VRFY_OCEANDA', 'FHMAX_FITS', 'waveGRD', 'IAUFHRS', 'DO_FIT2OBS', 'NET', 'FHOUT_HF_GFS', 'FHMAX_HF_GFS', 'REPLAY_ICS', 'OFFSET_START_HOUR', 'ARCH_EXPDIR', 'EXPDIR', 'ARCH_EXPDIR_FREQ', 'ARCH_HASHES', - 'ARCH_DIFFS', 'SDATE', 'EDATE', 'HOMEgfs'] + 'ARCH_DIFFS', 'SDATE', 'EDATE', 'HOMEgfs', 'DO_GEMPAK'] archive_dict = AttrDict() for key in keys: diff --git a/sorc/gfs_utils.fd b/sorc/gfs_utils.fd index 4848ecbb5e..6274ae7b2e 160000 --- a/sorc/gfs_utils.fd +++ b/sorc/gfs_utils.fd @@ -1 +1 @@ -Subproject commit 4848ecbb5e713b16127433e11f7d3edc6ac784c4 +Subproject commit 6274ae7b2eb6cd3d3ad02b5ad3a16b7d9be1a496 diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index b70b9e894f..1988fe60f6 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -167,9 +167,6 @@ cd "${HOMEgfs}/ush" || exit 8 for file in emcsfc_ice_blend.sh global_cycle_driver.sh emcsfc_snow.sh global_cycle.sh; do ${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_utils.fd/ush/${file}" . done -for file in make_ntc_bull.pl make_NTC_file.pl make_tif.sh month_name.sh; do - ${LINK_OR_COPY} "${HOMEgfs}/sorc/gfs_utils.fd/ush/${file}" . -done # Link these templates from ufs-weather-model cd "${HOMEgfs}/parm/ufs" || exit 1 diff --git a/ush/gfs_bufr.sh b/ush/gfs_bufr.sh index 0a7a8e8522..14b9540308 100755 --- a/ush/gfs_bufr.sh +++ b/ush/gfs_bufr.sh @@ -21,16 +21,17 @@ # 2024-08-08 Bo Cui: Update to handle one forecast at a time # echo "History: February 2003 - First implementation of this utility script" # -fhr="$1" -fhr_p="$2" -FINT="$3" -F00FLAG="$4" -workdir="$5" - -cd "${workdir}" || exit 2 source "${USHgfs}/preamble.sh" +fhr="${1}" +fhr_p="${2}" +FINT="${3}" +F00FLAG="${4}" +workdir="${5}" + +cd "${workdir}" || exit 2 + if [[ "${F00FLAG}" == "YES" ]]; then f00flag=".true." else diff --git a/ush/make_NTC_file.pl b/ush/make_NTC_file.pl new file mode 100755 index 0000000000..62905f6aae --- /dev/null +++ b/ush/make_NTC_file.pl @@ -0,0 +1,118 @@ +#!/usr/bin/perl +# +#------------------------------------------------------ +# +# This is make_NTC_file.pl +# It attaches the appropriate headers to the input file +# and copies it to a unique name for input to NTC. +# +# The following lines are prepended to the file: +# 1. A Bulletin Flag Field Seperator +# 2. A WMO header line +# 3. An optional subheader, e.g. DIFAX1064 +# +# Input wmoheader Originator datetime path +# where: +# wmoheader - WMO id to use in WMO header. +# subheader - "NONE" if none. +# Originator - Originator to use in WMO header +# datetime - date/time to use in WMO header, yyyymmddhh +# path - name input file +# output_path - name of output file +# +# Author: Paula Freeman based on script by Larry Sager +# +#------------------------------------------------------ + +$NArgs = @ARGV; + +if ($NArgs < 6) { + usage (); + exit; +} + +# +# Get input +# + +$WMOHeader=shift; +$Origin=shift; +$YYYYMMDDHH=shift; +$SubHeader=shift; +$Filename=shift; +$OutputFilename=shift; + +print "Filename is $Filename\n"; +print "Output Filename is $OutputFilename\n"; +$YYYYMMDDHH =~ /\d{4}(\d{2})(\d{4})/; +$MMDDHH = $1 . $2; +$DDHHMM = $2 . "00"; +print "WMOHeader = $WMOHeader\n"; +print "SubHeader = $SubHeader\n"; +print "Origin = $Origin\n"; + + +if ( ($WMOHeader eq "") || ($Origin eq "") || ($YYYYMMDDHH eq "") || ($Filename eq "") || ($OutputFilename eq "") || ($SubHeader eq "") ) { + usage (); + exit; +} + +# +# Create the file for TOC +# + + make_toc (); +# +# + + +sub usage () { + print "Usage: $0 \n"; +} + +sub make_toc { + +# +# Attach WMO header and subheader (if not "NONE"). +# Get the bytecount of file to insert into the Bulletin Flag Field Seperator. +# Add in length of WMO header, plus two carriage returns and line feed. +# If Subheader specified, count that in also, plus line a feed. +# + + $Header = "$WMOHeader $Origin $DDHHMM"; + $ByteCount = `wc -c $Filename | cut -c1-8`; + $ByteCount= $ByteCount + length($Header) + 3; + if ($SubHeader =~ /NONE/) { + print "No Subheader\n"; + } else { + if ($SubHeader =~ /IMAG/){ + $ByteCount = $ByteCount + length($SubHeader); + } else { + $ByteCount = $ByteCount + length($SubHeader) + 3; + } + } + $BulletinFlagFieldSep = sprintf( "****%10.10d****", $ByteCount); + + open(OUTFILE, ">$OutputFilename") or die "Cannot open $OutputFilename for output."; + print OUTFILE "$BulletinFlagFieldSep\n"; + print OUTFILE "$Header\r\r\n"; + if ($SubHeader =~ /NONE/) { + print "No Subheader\n"; + } else { + if ($SubHeader =~ /IMAG/){ + print OUTFILE "$SubHeader"; + } else { + print OUTFILE "$SubHeader\r\r\n"; + } + } + open (INFILE, $Filename) or die "Cannot open $Filename"; + + while ($rec=) { + print OUTFILE $rec; + } + + close INFILE; + close OUTFILE; + + print "$Filename -> $OutputFilename\n"; +} diff --git a/ush/make_ntc_bull.pl b/ush/make_ntc_bull.pl new file mode 100755 index 0000000000..6dc9e29231 --- /dev/null +++ b/ush/make_ntc_bull.pl @@ -0,0 +1,242 @@ +#!/usr/bin/perl +# +#------------------------------------------------------ +# +# This is make_ntc_bull.pl +# It attaches the appropriate headers to the input file +# and copies it to a unique name for input to NTC. +# +# A Bulletin Flag Field Separator is prepended to the +# text bulletin. This TOC header contains the total +# number of bytes in the product not counting the +# bulletin flag field separator. +# +# Input: +# File identifier - Output name identier. +# subheader - "NONE" if none. +# Originator - Not used currently +# datetime - Not used currently +# filename - input file name +# output_path - name of output file +# +# Author: Larry Sager based on a script by Paula Freeman +# +# 31 Oct 05 -- new script +# +#------------------------------------------------------ + +if ($ENV{job}) { $job=$ENV{job}; } +if ($ENV{SENDDBN}) { $SENDDBN=$ENV{SENDDBN}; } +$NArgs = @ARGV; + +if ($NArgs < 6) { + usage (); + exit; +} + +# +# Get input +# + +$NAME=shift; +$WMOname=shift; +$ORIGname=shift; +$DATEname=shift; +$Filename=shift; +$OutputFilename=shift; +print " Input : $Filename"; +print " Output: $OutputFilename"; + + +if ( ($Filename eq "") || ($OutputFilename eq "") ) { + usage (); + exit; +} + +# +# Create the file for TOC +# + if ( $NAME eq "plot" ) { + make_tocplot (); + } + elsif ($NAME eq "redb" ) { + make_tocredb (); + } + else { + make_tocbull (); + } +# +# + + +sub usage () { + print "Usage: $0 \n"; +} + +sub make_tocbull { + +# +# Attach WMO header +# Get the bytecount of file to insert into the Bulletin Flag Field Seperator. +# + + $ix = 0; + $under = "_"; + open (INFILE, $Filename) or die "Cannot open $Filename"; + + while ($cho=) { + $rec = $rec . $cho; + } + $cho = $rec; + $cho =~ s/\n//g; + $cho =~ s/<<@@/\r\r\n/g; + $cho =~ s/<<@/\r\r\n/g; + $cho =~ s/<//g; + $cho =~ s/\^//g; + $cho =~ s/\$//g; + $cho =~ s/\|/+/g; + $value = 40; + $Outp="$OutputFilename"; + open(OUTFILE, ">$Outp") or die "Cannot open $OutputFilename for output."; + while ($ix == 0) { + $cho = substr($cho,$value); + $value = 38; + $cho =~ s/'1/\&\&/; + $cho =~ s/'0/\&\&/; +# print "cho is $cho"; + ($cho2,$cho) = split(/\&\&/,$cho); + ($cho2,$cho3) = split(/\%/,$cho2); +# print "cho2 is $cho2"; + $ByteCount = length($cho2); + print " length is $ByteCount "; + $BulletinFlagFieldSep = sprintf( "****%10.10d****", $ByteCount); + if ($ByteCount > 50 ) { + print OUTFILE "$BulletinFlagFieldSep\n"; + print OUTFILE $cho2; + } + else { + $ix = 1; + } + } + close OUTFILE; + if ($SENDDBN eq "YES" ) { +# Modified 20051205 by wx11rp to ensure the current production machine is used. +# $dbn_alert="/gpfs/w/nco/dbnet/bin/dbn_alert"; + $dbn_alert=$ENV{'DBNROOT'} . "/bin/dbn_alert"; + $type="GRIB_LOW"; + $job2=$job; + $subtype=$ORIGname; + $file_path=$Outp; + @command = ($dbn_alert, $type, $subtype, $job2, $file_path); + if (system (@command) != 0) { + print "Error alerting: @command \n"; + } + } + + close INFILE; + close OUTFILE; + + print "$Filename -> $OutputFilename\n"; +} + +sub make_tocplot { + +# +# Attach WMO header +# Get the bytecount of file to insert into the Bulletin Flag Field Seperator. +# + + $ix = 0; + $under = "_"; + open (INFILE, $Filename) or die "Cannot open $Filename"; + + while ($cho=) { + $rec = $rec . $cho; + } + $cho = $rec; +# $Outp="$OutputFilename$under$job"; + $Outp="$OutputFilename"; + open(OUTFILE, ">$Outp") or die "Cannot open $OutputFilename for output."; + while ($ix == 0) { + $cho =~ s/\$\$/\&\&/; + ($cho2,$cho) = split(/\&\&/,$cho); +# $cho2 =~ s/@/ /g; +# $cho2 = $cho2 . " "; + $ByteCount = length($cho2); + print " length is $ByteCount "; + $BulletinFlagFieldSep = sprintf( "****%10.10d****", $ByteCount); + if ($ByteCount > 50 ) { + print OUTFILE "$BulletinFlagFieldSep\n"; + print OUTFILE $cho2; + } + else { + $ix = 1; + } + } + close OUTFILE; + if ($SENDDBN eq "YES" ) { +# 20051205 Modified by wx11rp to allow the script to run on any manchine labeled as the production machine +# $dbn_alert="/gpfs/w/nco/dbnet/bin/dbn_alert"; + $dbn_alert=$ENV{'DBNROOT'} . "/bin/dbn_alert"; + $type="GRIB_LOW"; + $subtype=$DATEname; + $job2=$job; + $file_path=$Outp; + @command = ($dbn_alert, $type, $subtype, $job2, $file_path); + if (system (@command) != 0) { + print "Error alerting: @command \n"; + } + } + + close INFILE; + close OUTFILE; + + print "$Filename -> $OutputFilename\n"; +} +sub make_tocredb { + +# +# Prepare the Redbook graphic for transmission to TOC by removing the AWIPS +# header and creating an NTC header. Get the Bytecount of the file to +# insert into the Bulletin Flag Field Seperator. +# + + $ix = 0; + $under = "_"; + open (INFILE, $Filename) or die "Cannot open $Filename"; + + while ($cho=) { + $rec = $rec . $cho; + } + $cho = $rec; + $Outp="$OutputFilename"; + open(OUTFILE, ">$Outp") or die "Cannot open $OutputFilename for output."; + $cho = substr($cho,24); + $ByteCount = length($cho); + print " length is $ByteCount "; + $BulletinFlagFieldSep = sprintf( "****%10.10d****", $ByteCount); + if ($ByteCount > 50 ) { + print OUTFILE "$BulletinFlagFieldSep\n"; + print OUTFILE $cho; + } + close OUTFILE; + if ($SENDDBN eq "YES" ) { +# 20051205 Modified by wx11rp to allow the script to run on any manchine labeled as the production machine +# $dbn_alert="/gpfs/w/nco/dbnet/bin/dbn_alert"; + $dbn_alert=$ENV{'DBNROOT'} . "/bin/dbn_alert"; + $type="GRIB_LOW"; + $subtype=$DATEname; + $job2=$job; + $file_path=$Outp; + @command = ($dbn_alert, $type, $subtype, $job2, $file_path); + if (system (@command) != 0) { + print "Error alerting: @command \n"; + } + } + + close INFILE; + close OUTFILE; + + print "$Filename -> $OutputFilename\n"; +} diff --git a/ush/make_tif.sh b/ush/make_tif.sh new file mode 100755 index 0000000000..633c4ded0c --- /dev/null +++ b/ush/make_tif.sh @@ -0,0 +1,31 @@ +#! /usr/bin/env bash + +source "${HOMEgfs}/ush/preamble.sh" + +cd "${DATA}" || exit 2 + +outname=out.tif + +convert gif:"${input}" fax:"${outname}" + +# +# Add the ntc heading: +# + +WMO=QTUA11 +ORIG=KWBC +PDYHH="${PDY}${cyc}" + +if [[ "${HEADER}" == "YES" ]]; then + INPATH="${DATA}/${outname}" + SUB=DFAX1064 + "${HOMEgfs}/ush/make_NTC_file.pl" "${WMO}" "${ORIG}" "${PDYHH}" "${SUB}" "${INPATH}" "${OUTPATH}" +# +# Send the graphic to TOC + +cp "${OUTPATH}" "${COMOUT_ATMOS_WMO}/gfs_500_hgt_tmp_nh_anl_${cyc}.tif" + if [[ "${SENDDBN}" == "YES" ]]; then + + "${DBNROOT}/bin/dbn_alert" GRIB_LOW "${NET}" "${job}" "${COMOUT_ATMOS_WMO}/gfs_500_hgt_tmp_nh_anl_${cyc}.tif" + fi +fi diff --git a/ush/month_name.sh b/ush/month_name.sh new file mode 100755 index 0000000000..463e4a08be --- /dev/null +++ b/ush/month_name.sh @@ -0,0 +1,113 @@ +#!/bin/bash + +#################################################################### +# +# SCRIPT: month_name.sh +# +# This script returns the name/abreviation of a month +# in a small text file, month_name.txt. It also echos the +# name/abreviation to stdout. The form of the returned +# name/abreviation is specified by the script arguments. +# +# USAGE: ./month_name.sh < month > < monthspec> +# +# EXAMPLE: ./month_name.sh 5 MON +# +# month spec contents of month_name.txt +# ----------- ------ ---------------------------- +# +# 6/06 Mon Jun +# 8/08 Month August +# 9/09 MON SEP +# 11 MONTH NOVEMBER +# +# +# Note: Variables may be assigned the value of the returned name +# by either of the following methods: +# +# MM=`cat month_name.txt` after executing month_name.sh +# - OR - +# MM=`month_name.sh 5 MON` (for example) +# +# +# +# HISTORY: 07/08/2005 - Original script +# +# +#################################################################### + + + typeset -Z2 month_num + + + month_num=$1 + month_spec=$2 + + case ${month_num} in + + 01) Mon=Jan + Month=January ;; + + 02) Mon=Feb + Month=February ;; + + 03) Mon=Mar + Month=March ;; + + 04) Mon=Apr + Month=April ;; + + 05) Mon=May + Month=May ;; + + 06) Mon=Jun + Month=June ;; + + 07) Mon=Jul + Month=July ;; + + 08) Mon=Aug + Month=August ;; + + 09) Mon=Sep + Month=September ;; + + 10) Mon=Oct + Month=October ;; + + 11) Mon=Nov + Month=November ;; + + 12) Mon=Dec + Month=December ;; + + *) + echo "FATAL ERROR input month number (${month_num}) is invalid" + exit 2 + + esac + + + if [[ "${month_spec}" == "Mon" ]]; then + + echo "${Mon}" + echo "${Mon}" > month_name.txt + + elif [[ "${month_spec}" == "Month" ]]; then + + echo "${Month}" + echo "${Month}" > month_name.txt + + elif [[ "${month_spec}" == "MON" ]]; then + + MON="${Mon^^}" + echo "${MON}" + echo "${MON}" > month_name.txt + + elif [[ "${month_spec}" == "MONTH" ]]; then + + MONTH="${Month^^}" + echo "${MONTH}" + echo "${MONTH}" > month_name.txt + + fi diff --git a/versions/run.wcoss2.ver b/versions/run.wcoss2.ver index f5b11b3a6f..6165bb37e9 100644 --- a/versions/run.wcoss2.ver +++ b/versions/run.wcoss2.ver @@ -27,6 +27,7 @@ export jasper_ver=2.0.25 export zlib_ver=1.2.11 export libpng_ver=1.6.37 export cdo_ver=1.9.8 +export imagemagick_ver=7.0.8-7 export hdf5_ver=1.10.6 export netcdf_ver=4.7.4 diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 4df03b9444..45a7bccc7a 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -119,7 +119,7 @@ def _get_app_configs(self, run): configs += ['postsnd'] if options['do_awips']: - configs += ['awips'] + configs += ['awips', 'fbwind'] if options['do_wave']: configs += ['waveinit', 'waveprep', 'wavepostsbs', 'wavepostpnt'] @@ -281,12 +281,9 @@ def get_task_names(self): task_names[run] += ['postsnd'] if options['do_gempak']: - task_names[run] += ['gempak'] - task_names[run] += ['gempakmeta'] - task_names[run] += ['gempakncdcupapgif'] + task_names[run] += ['gempak', 'gempakmeta', 'gempakncdcupapgif'] if options['do_goes']: - task_names[run] += ['npoess_pgrb2_0p5deg'] - task_names[run] += ['gempakpgrb2spec'] + task_names[run] += ['npoess_pgrb2_0p5deg', 'gempakpgrb2spec'] if options['do_awips']: task_names[run] += ['awips_20km_1p0deg', 'fbwind'] diff --git a/workflow/applications/gfs_forecast_only.py b/workflow/applications/gfs_forecast_only.py index fffdab6ef9..5b397c105b 100644 --- a/workflow/applications/gfs_forecast_only.py +++ b/workflow/applications/gfs_forecast_only.py @@ -61,7 +61,7 @@ def _get_app_configs(self, run): configs += ['gempak'] if options['do_awips']: - configs += ['awips'] + configs += ['awips', 'fbwind'] if options['do_ocean'] or options['do_ice']: configs += ['oceanice_products'] @@ -136,7 +136,7 @@ def get_task_names(self): tasks += ['postsnd'] if options['do_gempak']: - tasks += ['gempak', 'gempakmeta', 'gempakncdcupapgif', 'gempakpgrb2spec'] + tasks += ['gempak', 'gempakmeta'] if options['do_awips']: tasks += ['awips_20km_1p0deg', 'fbwind'] diff --git a/workflow/hosts/hera.yaml b/workflow/hosts/hera.yaml index e9e749ad3c..b95d0abb8d 100644 --- a/workflow/hosts/hera.yaml +++ b/workflow/hosts/hera.yaml @@ -24,6 +24,7 @@ LOCALARCH: 'NO' ATARDIR: '/NCEPDEV/${HPSS_PROJECT}/1year/${USER}/${machine}/scratch/${PSLOT}' MAKE_NSSTBUFR: 'NO' MAKE_ACFTBUFR: 'NO' +DO_AWIPS: 'NO' SUPPORTED_RESOLUTIONS: ['C1152', 'C768', 'C384', 'C192', 'C96', 'C48'] COMINecmwf: /scratch1/NCEPDEV/global/glopara/data/external_gempak/ecmwf COMINnam: /scratch1/NCEPDEV/global/glopara/data/external_gempak/nam diff --git a/workflow/hosts/hercules.yaml b/workflow/hosts/hercules.yaml index f528761cf1..a2974377dd 100644 --- a/workflow/hosts/hercules.yaml +++ b/workflow/hosts/hercules.yaml @@ -26,6 +26,7 @@ MAKE_NSSTBUFR: 'NO' MAKE_ACFTBUFR: 'NO' DO_TRACKER: 'NO' DO_GENESIS: 'NO' +DO_AWIPS: 'NO' SUPPORTED_RESOLUTIONS: ['C1152', 'C768', 'C384', 'C192', 'C96', 'C48'] COMINecmwf: /work/noaa/global/glopara/data/external_gempak/ecmwf COMINnam: /work/noaa/global/glopara/data/external_gempak/nam diff --git a/workflow/hosts/orion.yaml b/workflow/hosts/orion.yaml index 985c24c6fb..9b1a908e2c 100644 --- a/workflow/hosts/orion.yaml +++ b/workflow/hosts/orion.yaml @@ -26,6 +26,7 @@ MAKE_NSSTBUFR: 'NO' MAKE_ACFTBUFR: 'NO' DO_TRACKER: 'NO' DO_GENESIS: 'NO' +DO_AWIPS: 'NO' SUPPORTED_RESOLUTIONS: ['C1152', 'C768', 'C384', 'C192', 'C96', 'C48'] COMINecmwf: /work/noaa/global/glopara/data/external_gempak/ecmwf COMINnam: /work/noaa/global/glopara/data/external_gempak/nam diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index d2a3e43719..6cbf6bdb1f 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -1410,7 +1410,7 @@ def fbwind(self): deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') - resources = self.get_resource('awips') + resources = self.get_resource('fbwind') # TODO: It would be better to use task dependencies on the # individual post jobs rather than data dependencies to avoid # prematurely starting with partial files. Unfortunately, the @@ -2349,13 +2349,14 @@ def cleanup(self): elif self.run in ['gfs']: dep_dict = {'type': 'task', 'name': f'{self.run}_gempakmeta'} deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'task', 'name': f'{self.run}_gempakncdcupapgif'} - deps.append(rocoto.add_dependency(dep_dict)) - if self.options['do_goes']: - dep_dict = {'type': 'metatask', 'name': f'{self.run}_gempakgrb2spec'} - deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'task', 'name': f'{self.run}_npoess_pgrb2_0p5deg'} + if self.app_config.mode in ['cycled']: + dep_dict = {'type': 'task', 'name': f'{self.run}_gempakncdcupapgif'} deps.append(rocoto.add_dependency(dep_dict)) + if self.options['do_goes']: + dep_dict = {'type': 'task', 'name': f'{self.run}_npoess_pgrb2_0p5deg'} + deps.append(rocoto.add_dependency(dep_dict)) + dep_dict = {'type': 'metatask', 'name': f'{self.run}_gempakgrb2spec'} + deps.append(rocoto.add_dependency(dep_dict)) if self.options['do_metp'] and self.run in ['gfs']: deps2 = [] From e27f5df5bbe22323ebc583c1407259d8879a9b8e Mon Sep 17 00:00:00 2001 From: Walter Kolczynski - NOAA Date: Tue, 14 Jan 2025 19:08:13 -0500 Subject: [PATCH 24/33] Reinstate product groups (#3208) Testing with full-sized GEFS found that the sheer number of tasks overloads rocoto, resulting in `rocotorun` taking over 10 min to complete or hanging entirely. To reduce the number of tasks, product groups are reimplemented so that multiple forecast hour are processed in a single task. However, the implementation is a little different than previously. The jobs where groups are enabled (atmos_products, oceanice_products, wavepostsbs, atmos_ensstat, and gempak) have a new variable, `MAX_TASKS`, that controls how many groups to use. This setting is currently *per member*. The forecast hours to be processed are then divided into this many groups as evenly as possible without crossing forecast segment boundaries. The walltime for those jobs is then multiplied by the number of times in the largest group. For the gridded wave post job, the dependencies were also updated to trigger off of either the data being available or the appropriate segment completing (the dependencies had not been updated when the job was initially broken into fhrs). A number of helper methods are added to Tasks to determine these groups and make a standard metatask variable dict in a centralized location. There is also a function to multiply the walltime, but this may be better off relocated to wxflow with the other time functions. As part of switching from a single value to a list, hours are no longer passed by rocoto as zero-padded values. The lists are comma-delimited (without spaces) and split apart in the job stub (`jobs/rocoto/*`), so each j-job call is still a single forecast hour. The offline post (upp) job is not broken into groups, since it really isn't used outside the analysis anymore. Resolves #2999 Resolves #3210 --- jobs/rocoto/atmos_ensstat.sh | 21 ++-- jobs/rocoto/atmos_products.sh | 23 ++-- jobs/rocoto/gempak.sh | 19 +++- jobs/rocoto/oceanice_products.sh | 21 ++-- jobs/rocoto/wavepostsbs.sh | 21 ++-- parm/config/gefs/config.atmos_ensstat | 3 + parm/config/gefs/config.atmos_products | 4 +- parm/config/gefs/config.oceanice_products | 4 +- parm/config/gefs/config.resources | 10 +- parm/config/gefs/config.wavepostsbs | 3 + parm/config/gfs/config.atmos_products | 4 +- parm/config/gfs/config.gempak | 5 +- parm/config/gfs/config.oceanice_products | 3 + parm/config/gfs/config.resources | 8 +- parm/config/gfs/config.wavepostsbs | 3 + workflow/rocoto/gefs_tasks.py | 111 ++++++++++-------- workflow/rocoto/gfs_tasks.py | 84 ++++++++------ workflow/rocoto/tasks.py | 126 ++++++++++++++++++++- workflow/tests/__init__.py | 0 workflow/{ => tests}/test_configuration.py | 0 workflow/{ => tests}/test_hosts.py | 0 workflow/tests/test_tasks.py | 84 ++++++++++++++ 22 files changed, 431 insertions(+), 126 deletions(-) create mode 100644 workflow/tests/__init__.py rename workflow/{ => tests}/test_configuration.py (100%) rename workflow/{ => tests}/test_hosts.py (100%) create mode 100644 workflow/tests/test_tasks.py diff --git a/jobs/rocoto/atmos_ensstat.sh b/jobs/rocoto/atmos_ensstat.sh index 76ed7f0a72..617cbd77f8 100755 --- a/jobs/rocoto/atmos_ensstat.sh +++ b/jobs/rocoto/atmos_ensstat.sh @@ -13,13 +13,20 @@ status=$? if (( status != 0 )); then exit "${status}"; fi export job="atmos_ensstat" -export jobid="${job}.$$" -export FORECAST_HOUR=$(( 10#${FHR3} )) +# shellcheck disable=SC2153 +IFS=', ' read -r -a fhr_list <<< "${FHR_LIST}" -############################################################### -# Execute the JJOB -############################################################### -"${HOMEgfs}/jobs/JGLOBAL_ATMOS_ENSSTAT" +export FORECAST_HOUR jobid +for FORECAST_HOUR in "${fhr_list[@]}"; do + fhr3=$(printf '%03d' "${FORECAST_HOUR}") + jobid="${job}_f${fhr3}.$$" + ############################################################### + # Execute the JJOB + ############################################################### + "${HOMEgfs}/jobs/JGLOBAL_ATMOS_ENSSTAT" + status=$? + [[ ${status} -ne 0 ]] && exit "${status}" +done -exit $? +exit 0 diff --git a/jobs/rocoto/atmos_products.sh b/jobs/rocoto/atmos_products.sh index f6adbcf861..947b06dfc2 100755 --- a/jobs/rocoto/atmos_products.sh +++ b/jobs/rocoto/atmos_products.sh @@ -13,15 +13,20 @@ status=$? if (( status != 0 )); then exit "${status}"; fi export job="atmos_products" -export jobid="${job}.$$" -# Negatation needs to be before the base -fhr3_base="10#${FHR3}" -export FORECAST_HOUR=$(( ${fhr3_base/10#-/-10#} )) +# shellcheck disable=SC2153 +IFS=', ' read -r -a fhr_list <<< "${FHR_LIST}" -############################################################### -# Execute the JJOB -############################################################### -"${HOMEgfs}/jobs/JGLOBAL_ATMOS_PRODUCTS" +export FORECAST_HOUR jobid +for FORECAST_HOUR in "${fhr_list[@]}"; do + fhr3=$(printf '%03d' "${FORECAST_HOUR}") + jobid="${job}_f${fhr3}.$$" + ############################################################### + # Execute the JJOB + ############################################################### + "${HOMEgfs}/jobs/JGLOBAL_ATMOS_PRODUCTS" + status=$? + [[ ${status} -ne 0 ]] && exit "${status}" +done -exit $? +exit 0 diff --git a/jobs/rocoto/gempak.sh b/jobs/rocoto/gempak.sh index f5aea2379d..dc1d3f2621 100755 --- a/jobs/rocoto/gempak.sh +++ b/jobs/rocoto/gempak.sh @@ -6,11 +6,20 @@ status=$? if (( status != 0 )); then exit "${status}"; fi export job="gempak" -export jobid="${job}.$$" +# shellcheck disable=SC2153 +IFS=', ' read -r -a fhr_list <<< "${FHR_LIST}" -# Execute the JJOB -"${HOMEgfs}/jobs/J${RUN^^}_ATMOS_GEMPAK" +export FHR3 jobid +for fhr in "${fhr_list[@]}"; do + FHR3=$(printf '%03d' "${fhr}") + jobid="${job}_f${FHR3}.$$" + ############################################################### + # Execute the JJOB + ############################################################### + "${HOMEgfs}/jobs/J${RUN^^}_ATMOS_GEMPAK" + status=$? + [[ ${status} -ne 0 ]] && exit "${status}" +done -status=$? -exit "${status}" +exit 0 diff --git a/jobs/rocoto/oceanice_products.sh b/jobs/rocoto/oceanice_products.sh index 2a3b617d05..c3e03cea1a 100755 --- a/jobs/rocoto/oceanice_products.sh +++ b/jobs/rocoto/oceanice_products.sh @@ -13,13 +13,20 @@ status=$? if (( status != 0 )); then exit "${status}"; fi export job="oceanice_products" -export jobid="${job}.$$" -export FORECAST_HOUR=$(( 10#${FHR3} )) +# shellcheck disable=SC2153 +IFS=', ' read -r -a fhr_list <<< "${FHR_LIST}" -############################################################### -# Execute the JJOB -############################################################### -"${HOMEgfs}/jobs/JGLOBAL_OCEANICE_PRODUCTS" +export FORECAST_HOUR jobid +for FORECAST_HOUR in "${fhr_list[@]}"; do + fhr3=$(printf '%03d' "${FORECAST_HOUR}") + jobid="${job}_${COMPONENT}_f${fhr3}.$$" + ############################################################### + # Execute the JJOB + ############################################################### + "${HOMEgfs}/jobs/JGLOBAL_OCEANICE_PRODUCTS" + status=$? + [[ ${status} -ne 0 ]] && exit "${status}" +done -exit $? +exit 0 diff --git a/jobs/rocoto/wavepostsbs.sh b/jobs/rocoto/wavepostsbs.sh index f4789210d8..ff81c2a9d3 100755 --- a/jobs/rocoto/wavepostsbs.sh +++ b/jobs/rocoto/wavepostsbs.sh @@ -5,17 +5,24 @@ source "${HOMEgfs}/ush/preamble.sh" ############################################################### # Source FV3GFS workflow modules #. ${HOMEgfs}/ush/load_fv3gfs_modules.sh -. ${HOMEgfs}/ush/load_ufswm_modules.sh +source "${HOMEgfs}/ush/load_ufswm_modules.sh" status=$? -[[ ${status} -ne 0 ]] && exit ${status} +[[ ${status} -ne 0 ]] && exit "${status}" export job="wavepostsbs" -export jobid="${job}.$$" ############################################################### -# Execute the JJOB -${HOMEgfs}/jobs/JGLOBAL_WAVE_POST_SBS -status=$? -[[ ${status} -ne 0 ]] && exit ${status} +# shellcheck disable=SC2153 +IFS=', ' read -r -a fhr_list <<< "${FHR_LIST}" + +export FHR3 jobid +for FORECAST_HOUR in "${fhr_list[@]}"; do + FHR3=$(printf '%03d' "${FORECAST_HOUR}") + jobid="${job}_f${FHR3}.$$" + # Execute the JJOB + "${HOMEgfs}/jobs/JGLOBAL_WAVE_POST_SBS" + status=$? + [[ ${status} -ne 0 ]] && exit "${status}" +done exit 0 diff --git a/parm/config/gefs/config.atmos_ensstat b/parm/config/gefs/config.atmos_ensstat index d371f75887..b542659523 100644 --- a/parm/config/gefs/config.atmos_ensstat +++ b/parm/config/gefs/config.atmos_ensstat @@ -5,6 +5,9 @@ echo "BEGIN: config.atmos_ensstat" +# Maximum number of rocoto tasks +export MAX_TASKS=25 + # Get task specific resources . "${EXPDIR}/config.resources" atmos_ensstat diff --git a/parm/config/gefs/config.atmos_products b/parm/config/gefs/config.atmos_products index e8aae324e1..d1f36a7bc9 100644 --- a/parm/config/gefs/config.atmos_products +++ b/parm/config/gefs/config.atmos_products @@ -8,8 +8,8 @@ echo "BEGIN: config.atmos_products" # Get task specific resources . "${EXPDIR}/config.resources" atmos_products -# No. of forecast hours to process in a single job -export NFHRS_PER_GROUP=3 +# Maximum number of rocoto tasks per member +export MAX_TASKS=25 # Scripts used by this job export INTERP_ATMOS_MASTERSH="${USHgfs}/interp_atmos_master.sh" diff --git a/parm/config/gefs/config.oceanice_products b/parm/config/gefs/config.oceanice_products index 3b8b064947..6bb604d0ca 100644 --- a/parm/config/gefs/config.oceanice_products +++ b/parm/config/gefs/config.oceanice_products @@ -9,7 +9,7 @@ source "${EXPDIR}/config.resources" oceanice_products export OCEANICEPRODUCTS_CONFIG="${PARMgfs}/post/oceanice_products_gefs.yaml" -# No. of forecast hours to process in a single job -export NFHRS_PER_GROUP=3 +# Maximum number of rocoto tasks per member +export MAX_TASKS=25 echo "END: config.oceanice_products" diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index 17033858c8..bb33f3eb02 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -234,6 +234,7 @@ case ${step} in ;; "atmos_products") + # Walltime is per forecast hour; will be multipled by group size export walltime="00:15:00" export ntasks=24 export threads_per_task=1 @@ -242,7 +243,8 @@ case ${step} in ;; "atmos_ensstat") - export walltime="00:30:00" + # Walltime is per forecast hour; will be multipled by group size + export walltime="00:15:00" export ntasks=6 export threads_per_task=1 export tasks_per_node="${ntasks}" @@ -250,6 +252,7 @@ case ${step} in ;; "oceanice_products") + # Walltime is per forecast hour; will be multipled by group size export walltime="00:15:00" export ntasks=1 export tasks_per_node=1 @@ -258,7 +261,8 @@ case ${step} in ;; "wavepostsbs") - export walltime="03:00:00" + # Walltime is per forecast hour; will be multipled by group size + export walltime="00:15:00" export ntasks=1 export threads_per_task=1 export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) @@ -328,7 +332,7 @@ case ${step} in ;; "cleanup") - export walltime="00:15:00" + export walltime="00:30:00" export ntasks=1 export tasks_per_node=1 export threads_per_task=1 diff --git a/parm/config/gefs/config.wavepostsbs b/parm/config/gefs/config.wavepostsbs index 82cec321da..b43ea33d40 100644 --- a/parm/config/gefs/config.wavepostsbs +++ b/parm/config/gefs/config.wavepostsbs @@ -8,6 +8,9 @@ echo "BEGIN: config.wavepostsbs" # Get task specific resources source "${EXPDIR}/config.resources" wavepostsbs +# Maximum number of rocoto tasks per member +export MAX_TASKS=25 + # Subgrid info for grib2 encoding export WAV_SUBGRBSRC="" export WAV_SUBGRB="" diff --git a/parm/config/gfs/config.atmos_products b/parm/config/gfs/config.atmos_products index 451f5eff86..5b6e4067b5 100644 --- a/parm/config/gfs/config.atmos_products +++ b/parm/config/gfs/config.atmos_products @@ -8,8 +8,8 @@ echo "BEGIN: config.atmos_products" # Get task specific resources . "${EXPDIR}/config.resources" atmos_products -# No. of forecast hours to process in a single job -export NFHRS_PER_GROUP=3 +## Maximum number of rocoto tasks per member +export MAX_TASKS=25 # Scripts used by this job export INTERP_ATMOS_MASTERSH="${USHgfs}/interp_atmos_master.sh" diff --git a/parm/config/gfs/config.gempak b/parm/config/gfs/config.gempak index 791770ba4a..db5e85af3f 100644 --- a/parm/config/gfs/config.gempak +++ b/parm/config/gfs/config.gempak @@ -5,7 +5,10 @@ echo "BEGIN: config.gempak" +# Maximum number of rocoto tasks per member +export MAX_TASKS=25 + # Get task specific resources -. $EXPDIR/config.resources gempak +source "${EXPDIR}/config.resources" gempak echo "END: config.gempak" diff --git a/parm/config/gfs/config.oceanice_products b/parm/config/gfs/config.oceanice_products index 9e5c5b1c68..a618cbe10c 100644 --- a/parm/config/gfs/config.oceanice_products +++ b/parm/config/gfs/config.oceanice_products @@ -7,6 +7,9 @@ echo "BEGIN: config.oceanice_products" # Get task specific resources source "${EXPDIR}/config.resources" oceanice_products +# Maximum number of rocoto tasks per member +export MAX_TASKS=25 + export OCEANICEPRODUCTS_CONFIG="${PARMgfs}/post/oceanice_products.yaml" # No. of forecast hours to process in a single job diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 6a85a6de14..0eea92cbde 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -191,8 +191,9 @@ case ${step} in ;; "wavepostsbs") - walltime_gdas="00:20:00" - walltime_gfs="03:00:00" + # Walltime is per forecast hour; will be multipled by group size + walltime_gdas="00:15:00" + walltime_gfs="00:15:00" ntasks=8 threads_per_task=1 tasks_per_node=$(( max_tasks_per_node / threads_per_task )) @@ -911,6 +912,7 @@ case ${step} in ;; "oceanice_products") + # Walltime is per forecast hour; will be multipled by group size walltime="00:15:00" ntasks=1 tasks_per_node=1 @@ -949,6 +951,7 @@ case ${step} in ;; "atmos_products") + # Walltime is per forecast hour; will be multipled by group size walltime="00:15:00" ntasks=24 threads_per_task=1 @@ -1278,6 +1281,7 @@ case ${step} in ;; "gempak") + # Walltime is per forecast hour; will be multipled by group size walltime="00:30:00" ntasks_gdas=2 ntasks_gfs=28 diff --git a/parm/config/gfs/config.wavepostsbs b/parm/config/gfs/config.wavepostsbs index 82cec321da..b43ea33d40 100644 --- a/parm/config/gfs/config.wavepostsbs +++ b/parm/config/gfs/config.wavepostsbs @@ -8,6 +8,9 @@ echo "BEGIN: config.wavepostsbs" # Get task specific resources source "${EXPDIR}/config.resources" wavepostsbs +# Maximum number of rocoto tasks per member +export MAX_TASKS=25 + # Subgrid info for grib2 encoding export WAV_SUBGRBSRC="" export WAV_SUBGRB="" diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py index ca29bcdf1e..f89d3dbbb0 100644 --- a/workflow/rocoto/gefs_tasks.py +++ b/workflow/rocoto/gefs_tasks.py @@ -190,39 +190,57 @@ def _atmosoceaniceprod(self, component: str): fhout_ice_gfs = self._configs['base']['FHOUT_ICE_GFS'] products_dict = {'atmos': {'config': 'atmos_products', 'history_path_tmpl': 'COM_ATMOS_MASTER_TMPL', - 'history_file_tmpl': f'{self.run}.t@Hz.master.grb2f#fhr#'}, + 'history_file_tmpl': f'{self.run}.t@Hz.master.grb2f#fhr3_last#'}, 'ocean': {'config': 'oceanice_products', 'history_path_tmpl': 'COM_OCEAN_HISTORY_TMPL', - 'history_file_tmpl': f'{self.run}.ocean.t@Hz.{fhout_ocn_gfs}hr_avg.f#fhr_next#.nc'}, + 'history_file_tmpl': f'{self.run}.ocean.t@Hz.{fhout_ocn_gfs}hr_avg.f#fhr3_next#.nc'}, 'ice': {'config': 'oceanice_products', 'history_path_tmpl': 'COM_ICE_HISTORY_TMPL', - 'history_file_tmpl': f'{self.run}.ice.t@Hz.{fhout_ice_gfs}hr_avg.f#fhr#.nc'}} + 'history_file_tmpl': f'{self.run}.ice.t@Hz.{fhout_ice_gfs}hr_avg.f#fhr3_last#.nc'}} component_dict = products_dict[component] config = component_dict['config'] history_path_tmpl = component_dict['history_path_tmpl'] history_file_tmpl = component_dict['history_file_tmpl'] + max_tasks = self._configs[config]['MAX_TASKS'] resources = self.get_resource(config) + fhrs = self._get_forecast_hours('gefs', self._configs[config], component) + + # when replaying, atmos component does not have fhr 0, therefore remove 0 from fhrs + is_replay = self._configs[config]['REPLAY_ICS'] + if is_replay and component in ['atmos'] and 0 in fhrs: + fhrs.remove(0) + + # ocean/ice components do not have fhr 0 as they are averaged output + if component in ['ocean', 'ice'] and 0 in fhrs: + fhrs.remove(0) + + fhr_var_dict = self.get_grouped_fhr_dict(fhrs=fhrs, ngroups=max_tasks) + + # Adjust walltime based on the largest group + largest_group = max([len(grp.split(',')) for grp in fhr_var_dict['fhr_list'].split(' ')]) + resources['walltime'] = Tasks.multiply_HMS(resources['walltime'], largest_group) + history_path = self._template_to_rocoto_cycstring(self._base[history_path_tmpl], {'MEMDIR': 'mem#member#'}) deps = [] data = f'{history_path}/{history_file_tmpl}' dep_dict = {'type': 'data', 'data': data, 'age': 120} deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'metatask', 'name': 'gefs_fcst_mem#member#'} + dep_dict = {'type': 'task', 'name': 'gefs_fcst_mem#member#_#seg_dep#'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps, dep_condition='or') postenvars = self.envars.copy() postenvar_dict = {'ENSMEM': '#member#', 'MEMDIR': 'mem#member#', - 'FHR3': '#fhr#', + 'FHR_LIST': '#fhr_list#', 'COMPONENT': component} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) - task_name = f'gefs_{component}_prod_mem#member#_f#fhr#' + task_name = f'gefs_{component}_prod_mem#member#_#fhr_label#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, @@ -233,22 +251,6 @@ def _atmosoceaniceprod(self, component: str): 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', 'maxtries': '&MAXTRIES;'} - fhrs = self._get_forecast_hours('gefs', self._configs[config], component) - - # when replaying, atmos component does not have fhr 0, therefore remove 0 from fhrs - is_replay = self._configs[config]['REPLAY_ICS'] - if is_replay and component in ['atmos'] and 0 in fhrs: - fhrs.remove(0) - - # ocean/ice components do not have fhr 0 as they are averaged output - if component in ['ocean', 'ice'] and 0 in fhrs: - fhrs.remove(0) - - fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} - if component in ['ocean']: - fhrs_next = fhrs[1:] + [fhrs[-1] + (fhrs[-1] - fhrs[-2])] - fhr_var_dict['fhr_next'] = ' '.join([f"{fhr:03d}" for fhr in fhrs_next]) - fhr_metatask_dict = {'task_name': f'gefs_{component}_prod_#member#', 'task_dict': task_dict, 'var_dict': fhr_var_dict} @@ -268,18 +270,32 @@ def atmos_ensstat(self): deps = [] for member in range(0, self.nmem + 1): - task = f'gefs_atmos_prod_mem{member:03d}_f#fhr#' + task = f'gefs_atmos_prod_mem{member:03d}_#fhr_label#' dep_dict = {'type': 'task', 'name': task} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) + fhrs = self._get_forecast_hours('gefs', self._configs['atmos_ensstat']) + + # when replaying, atmos component does not have fhr 0, therefore remove 0 from fhrs + is_replay = self._configs['atmos_ensstat']['REPLAY_ICS'] + if is_replay and 0 in fhrs: + fhrs.remove(0) + + max_tasks = self._configs['atmos_ensstat']['MAX_TASKS'] + fhr_var_dict = self.get_grouped_fhr_dict(fhrs=fhrs, ngroups=max_tasks) + + # Adjust walltime based on the largest group + largest_group = max([len(grp.split(',')) for grp in fhr_var_dict['fhr_list'].split(' ')]) + resources['walltime'] = Tasks.multiply_HMS(resources['walltime'], largest_group) + postenvars = self.envars.copy() - postenvar_dict = {'FHR3': '#fhr#'} + postenvar_dict = {'FHR_LIST': '#fhr_list#'} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) - task_name = f'gefs_atmos_ensstat_f#fhr#' + task_name = f'gefs_atmos_ensstat_#fhr_label#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, @@ -290,15 +306,6 @@ def atmos_ensstat(self): 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', 'maxtries': '&MAXTRIES;'} - fhrs = self._get_forecast_hours('gefs', self._configs['atmos_ensstat']) - - # when replaying, atmos component does not have fhr 0, therefore remove 0 from fhrs - is_replay = self._configs['atmos_ensstat']['REPLAY_ICS'] - if is_replay and 0 in fhrs: - fhrs.remove(0) - - fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} - fhr_metatask_dict = {'task_name': f'gefs_atmos_ensstat', 'task_dict': task_dict, 'var_dict': fhr_var_dict} @@ -308,22 +315,43 @@ def atmos_ensstat(self): return task def wavepostsbs(self): + + wave_grid = self._configs['base']['waveGRD'] + history_path = self._template_to_rocoto_cycstring(self._base['COM_WAVE_HISTORY_TMPL'], {'MEMDIR': 'mem#member#'}) + history_file = f'/{self.run}wave.out_grd.{wave_grid}.@Y@m@d.@H@M@S' + deps = [] - dep_dict = {'type': 'metatask', 'name': f'gefs_fcst_mem#member#'} + dep_dict = {'type': 'data', 'data': [history_path, history_file], 'offset': [None, '#fhr3_next#:00:00']} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) + dep_dict = {'type': 'task', 'name': f'{self.run}_fcst_mem#member#_#seg_dep#'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps, dep_condition='or') + + fhrs = self._get_forecast_hours('gefs', self._configs['wavepostsbs'], 'wave') + + # When using replay, output does not start until hour 3 + is_replay = self._configs['wavepostsbs']['REPLAY_ICS'] + if is_replay: + fhrs = [fhr for fhr in fhrs if fhr not in [0, 1, 2]] + + max_tasks = self._configs['wavepostsbs']['MAX_TASKS'] + fhr_var_dict = self.get_grouped_fhr_dict(fhrs=fhrs, ngroups=max_tasks) wave_post_envars = self.envars.copy() postenvar_dict = {'ENSMEM': '#member#', 'MEMDIR': 'mem#member#', - 'FHR3': '#fhr#', + 'FHR_LIST': '#fhr_list#', } for key, value in postenvar_dict.items(): wave_post_envars.append(rocoto.create_envar(name=key, value=str(value))) resources = self.get_resource('wavepostsbs') - task_name = f'gefs_wave_post_grid_mem#member#_f#fhr#' + # Adjust walltime based on the largest group + largest_group = max([len(grp.split(',')) for grp in fhr_var_dict['fhr_list'].split(' ')]) + resources['walltime'] = Tasks.multiply_HMS(resources['walltime'], largest_group) + + task_name = f'gefs_wave_post_grid_mem#member#_#fhr_label#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, @@ -335,13 +363,6 @@ def wavepostsbs(self): 'maxtries': '&MAXTRIES;' } - fhrs = self._get_forecast_hours('gefs', self._configs['wavepostsbs'], 'wave') - is_replay = self._configs['wavepostsbs']['REPLAY_ICS'] - if is_replay: - fhrs = [fhr for fhr in fhrs if fhr not in [0, 1, 2]] - - fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} - fhr_metatask_dict = {'task_name': f'gefs_wave_post_grid_#member#', 'task_dict': task_dict, 'var_dict': fhr_var_dict} diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 6cbf6bdb1f..d709393f95 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -1029,7 +1029,7 @@ def atmanlupp(self): def atmanlprod(self): postenvars = self.envars.copy() - postenvar_dict = {'FHR3': '-001'} + postenvar_dict = {'FHR_LIST': '-1'} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) @@ -1126,21 +1126,36 @@ def _atmosoceaniceprod(self, component: str): products_dict = {'atmos': {'config': 'atmos_products', 'history_path_tmpl': 'COM_ATMOS_MASTER_TMPL', - 'history_file_tmpl': f'{self.run}.t@Hz.master.grb2f#fhr#'}, + 'history_file_tmpl': f'{self.run}.t@Hz.master.grb2f#fhr3_last#'}, 'ocean': {'config': 'oceanice_products', 'history_path_tmpl': 'COM_OCEAN_HISTORY_TMPL', - 'history_file_tmpl': f'{self.run}.ocean.t@Hz.6hr_avg.f#fhr_next#.nc'}, + 'history_file_tmpl': f'{self.run}.ocean.t@Hz.6hr_avg.f#fhr3_next#.nc'}, 'ice': {'config': 'oceanice_products', 'history_path_tmpl': 'COM_ICE_HISTORY_TMPL', - 'history_file_tmpl': f'{self.run}.ice.t@Hz.6hr_avg.f#fhr#.nc'}} + 'history_file_tmpl': f'{self.run}.ice.t@Hz.6hr_avg.f#fhr3_last#.nc'}} component_dict = products_dict[component] config = component_dict['config'] history_path_tmpl = component_dict['history_path_tmpl'] history_file_tmpl = component_dict['history_file_tmpl'] + max_tasks = self._configs[config]['MAX_TASKS'] + resources = self.get_resource(component_dict['config']) + + fhrs = self._get_forecast_hours(self.run, self._configs[config], component) + + # ocean/ice components do not have fhr 0 as they are averaged output + if component in ['ocean', 'ice'] and 0 in fhrs: + fhrs.remove(0) + + fhr_var_dict = self.get_grouped_fhr_dict(fhrs=fhrs, ngroups=max_tasks) + + # Adjust walltime based on the largest group + largest_group = max([len(grp.split(',')) for grp in fhr_var_dict['fhr_list'].split(' ')]) + resources['walltime'] = Tasks.multiply_HMS(resources['walltime'], largest_group) + postenvars = self.envars.copy() - postenvar_dict = {'FHR3': '#fhr#', 'COMPONENT': component} + postenvar_dict = {'FHR_LIST': '#fhr_list#', 'COMPONENT': component} for key, value in postenvar_dict.items(): postenvars.append(rocoto.create_envar(name=key, value=str(value))) @@ -1154,9 +1169,8 @@ def _atmosoceaniceprod(self, component: str): dependencies = rocoto.create_dependency(dep=deps, dep_condition='or') cycledef = 'gdas_half,gdas' if self.run in ['gdas'] else self.run - resources = self.get_resource(component_dict['config']) - task_name = f'{self.run}_{component}_prod_f#fhr#' + task_name = f'{self.run}_{component}_prod_#fhr_label#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, @@ -1168,17 +1182,6 @@ def _atmosoceaniceprod(self, component: str): 'maxtries': '&MAXTRIES;' } - fhrs = self._get_forecast_hours(self.run, self._configs[config], component) - - # ocean/ice components do not have fhr 0 as they are averaged output - if component in ['ocean', 'ice'] and 0 in fhrs: - fhrs.remove(0) - - fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} - - if component in ['ocean']: - fhrs_next = fhrs[1:] + [fhrs[-1] + (fhrs[-1] - fhrs[-2])] - fhr_var_dict['fhr_next'] = ' '.join([f"{fhr:03d}" for fhr in fhrs_next]) metatask_dict = {'task_name': f'{self.run}_{component}_prod', 'task_dict': task_dict, 'var_dict': fhr_var_dict} @@ -1189,18 +1192,32 @@ def _atmosoceaniceprod(self, component: str): def wavepostsbs(self): + wave_grid = self._configs['base']['waveGRD'] + history_path = self._template_to_rocoto_cycstring(self._base['COM_WAVE_HISTORY_TMPL']) + history_file = f'/{self.run}wave.out_grd.{wave_grid}.@Y@m@d.@H@M@S' + deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.run}_fcst'} + dep_dict = {'type': 'data', 'data': [history_path, history_file], 'offset': [None, '#fhr3_next#:00:00']} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) + dep_dict = {'type': 'task', 'name': f'{self.run}_fcst_#seg_dep#'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps, dep_condition='or') + + fhrs = self._get_forecast_hours('gfs', self._configs['wavepostsbs'], 'wave') + max_tasks = self._configs['wavepostsbs']['MAX_TASKS'] + fhr_var_dict = self.get_grouped_fhr_dict(fhrs=fhrs, ngroups=max_tasks) wave_post_envars = self.envars.copy() - postenvar_dict = {'FHR3': '#fhr#'} + postenvar_dict = {'FHR_LIST': '#fhr_list#'} for key, value in postenvar_dict.items(): wave_post_envars.append(rocoto.create_envar(name=key, value=str(value))) resources = self.get_resource('wavepostsbs') - task_name = f'{self.run}_wavepostsbs_f#fhr#' + # Adjust walltime based on the largest group + largest_group = max([len(grp.split(',')) for grp in fhr_var_dict['fhr_list'].split(' ')]) + resources['walltime'] = Tasks.multiply_HMS(resources['walltime'], largest_group) + + task_name = f'{self.run}_wavepostsbs_#fhr_label#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, @@ -1212,12 +1229,9 @@ def wavepostsbs(self): 'maxtries': '&MAXTRIES;' } - fhrs = self._get_forecast_hours('gfs', self._configs['wavepostsbs'], 'wave') - - fhr_metatask_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} metatask_dict = {'task_name': f'{self.run}_wavepostsbs', 'task_dict': task_dict, - 'var_dict': fhr_metatask_dict} + 'var_dict': fhr_var_dict} task = rocoto.create_task(metatask_dict) @@ -1512,17 +1526,26 @@ def awips_20km_1p0deg(self): def gempak(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.run}_atmos_prod_f#fhr#'} + dep_dict = {'type': 'task', 'name': f'{self.run}_atmos_prod_#fhr_label#'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) + fhrs = self._get_forecast_hours(self.run, self._configs['gempak']) + max_tasks = self._configs['gempak']['MAX_TASKS'] + fhr_var_dict = self.get_grouped_fhr_dict(fhrs=fhrs, ngroups=max_tasks) + + resources = self.get_resource('gempak') + # Adjust walltime based on the largest group + largest_group = max([len(grp.split(',')) for grp in fhr_var_dict['fhr_list'].split(' ')]) + resources['walltime'] = Tasks.multiply_HMS(resources['walltime'], largest_group) + gempak_vars = self.envars.copy() - gempak_dict = {'FHR3': '#fhr#'} + gempak_dict = {'FHR_LIST': '#fhr_list#'} for key, value in gempak_dict.items(): gempak_vars.append(rocoto.create_envar(name=key, value=str(value))) resources = self.get_resource('gempak') - task_name = f'{self.run}_gempak_f#fhr#' + task_name = f'{self.run}_gempak_#fhr_label#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, @@ -1534,9 +1557,6 @@ def gempak(self): 'maxtries': '&MAXTRIES;' } - fhrs = self._get_forecast_hours(self.run, self._configs['gempak']) - fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} - fhr_metatask_dict = {'task_name': f'{self.run}_gempak', 'task_dict': task_dict, 'var_dict': fhr_var_dict} diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py index d9c769ffbe..3c215414b5 100644 --- a/workflow/rocoto/tasks.py +++ b/workflow/rocoto/tasks.py @@ -3,8 +3,9 @@ import numpy as np from applications.applications import AppConfig import rocoto.rocoto as rocoto -from wxflow import Template, TemplateConstants, to_timedelta -from typing import List +from wxflow import Template, TemplateConstants, to_timedelta, timedelta_to_HMS +from typing import List, Union +from bisect import bisect_right __all__ = ['Tasks'] @@ -176,6 +177,127 @@ def _get_forecast_hours(run, config, component='atmos') -> List[str]: return fhrs + @staticmethod + def get_job_groups(fhrs: List[int], ngroups: int, breakpoints: List[int] = None) -> List[dict]: + ''' + Split forecast hours into a number of groups, obeying a list of pre-set breakpoints. + + Takes a list of forecast hours and splits it into a number of groups while obeying + a list of pre-set breakpoints and recording which segment each belongs to. + + Parameters + ---------- + fhrs: List[int] + List of forecast hours to break into groups + ngroups: int + Number of groups to split the forecast hours into + breakpoints: List[int] + List of preset forecast hour break points to use (default: []) + + Returns + ------- + List[dict]: List of dicts, where each dict contains two keys: + 'fhrs': the forecast hours for that group + 'seg': the forecast segment (from the original breakpoint list) + the group belong to + ''' + if breakpoints is None: + breakpoints = [] + + num_segs = len(breakpoints) + 1 + if num_segs > ngroups: + raise ValueError(f"Number of segments ({num_segs}) is greater than the number of groups ({ngroups}") + + if ngroups > len(fhrs): + ngroups = len(fhrs) + + # First, split at segment boundaries + fhrs_segs = [grp.tolist() for grp in np.array_split(fhrs, [bisect_right(fhrs, bpnt) for bpnt in breakpoints if bpnt < max(fhrs)])] + seg_lens = [len(seg) for seg in fhrs_segs] + + # Initialize each segment to be split into one job group + ngroups_segs = [1 for _ in range(0, len(fhrs_segs))] + + # For remaining job groups, iteratively assign to the segment with the most + # hours per group + for _ in range(0, ngroups - len(fhrs_segs)): + current_lens = [size / weight for size, weight in zip(seg_lens, ngroups_segs)] + index_max = max(range(len(current_lens)), key=current_lens.__getitem__) + ngroups_segs[index_max] += 1 + + # Now that we know how many groups each forecast segment should be split into, + # Split them and flatten to a single list. + groups = [] + for seg_num, (fhrs_seg, ngroups_seg) in enumerate(zip(fhrs_segs, ngroups_segs)): + [groups.append({'fhrs': grp.tolist(), 'seg': seg_num}) for grp in np.array_split(fhrs_seg, ngroups_seg)] + + return groups + + def get_grouped_fhr_dict(self, fhrs: List[int], ngroups: int) -> dict: + ''' + Prepare a metatask dictionary for forecast hour groups. + + Takes a list of forecast hours and splits it into a number of groups while not + crossing forecast segment boundaries. Then use that to prepare a dict with key + variable lists for use in a rocoto metatask. + + Parameters + ---------- + fhrs: List[int] + List of forecast hours to break into groups + ngroups: int + Number of groups to split the forecast hours into + + Returns + ------- + dict: Several variable lists for use in rocoto metatasks: + fhr_list: list of comma-separated lists of fhr groups + fhr_label: list of labels corresponding to the fhr range + fhr3_last: list of the last fhr in each group, formatted to three digits + fhr3_next: list of the fhr that would follow each group, formatted to + three digits + seg_dep: list of segments each group belongs to + ''' + fhr_breakpoints = self.options['fcst_segments'][1:-1] + group_dicts = Tasks.get_job_groups(fhrs=fhrs, ngroups=ngroups, breakpoints=fhr_breakpoints) + + fhrs_group = [dct['fhrs'] for dct in group_dicts] + fhrs_first = [grp[0] for grp in fhrs_group] + fhrs_last = [grp[-1] for grp in fhrs_group] + fhrs_next = fhrs_first[1:] + [fhrs_last[-1] + (fhrs[-1] - fhrs[-2])] + grp_str = [f'f{grp[0]:03d}-f{grp[-1]:03d}' if len(grp) > 1 else f'f{grp[0]:03d}' for grp in fhrs_group] + seg_deps = [f'seg{dct["seg"]}' for dct in group_dicts] + + fhr_var_dict = {'fhr_list': ' '.join(([','.join(str(fhr) for fhr in grp) for grp in fhrs_group])), + 'fhr_label': ' '.join(grp_str), + 'seg_dep': ' '.join(seg_deps), + 'fhr3_last': ' '.join([f'{fhr:03d}' for fhr in fhrs_last]), + 'fhr3_next': ' '.join([f'{fhr:03d}' for fhr in fhrs_next]) + } + + return fhr_var_dict + + @staticmethod + def multiply_HMS(hms_timedelta: str, multiplier: Union[int, float]) -> str: + ''' + Multiplies an HMS timedelta string + + Parameters + ---------- + hms_timedelta: str + String representing a time delta in HH:MM:SS format + multiplier: int | float + Value to multiply the time delta by + + Returns + ------- + str: String representing a time delta in HH:MM:SS format + + ''' + input_timedelta = to_timedelta(hms_timedelta) + output_timedelta = input_timedelta * multiplier + return timedelta_to_HMS(output_timedelta) + def get_resource(self, task_name): """ Given a task name (task_name) and its configuration (task_names), diff --git a/workflow/tests/__init__.py b/workflow/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/workflow/test_configuration.py b/workflow/tests/test_configuration.py similarity index 100% rename from workflow/test_configuration.py rename to workflow/tests/test_configuration.py diff --git a/workflow/test_hosts.py b/workflow/tests/test_hosts.py similarity index 100% rename from workflow/test_hosts.py rename to workflow/tests/test_hosts.py diff --git a/workflow/tests/test_tasks.py b/workflow/tests/test_tasks.py new file mode 100644 index 0000000000..7e7e7eb5e7 --- /dev/null +++ b/workflow/tests/test_tasks.py @@ -0,0 +1,84 @@ +from rocoto.tasks import Tasks + + +class TestTasks: + + ''' + Tasks class tests + + Note: this is currently only testing a small fraction of the class. + ''' + + def test_job_groups(self): + test_array = list(range(0, 24)) + + # Test simple splitting with no breakpoints + test_groups = [{'fhrs': [0, 1, 2, 3, 4, 5], 'seg': 0}, + {'fhrs': [6, 7, 8, 9, 10, 11], 'seg': 0}, + {'fhrs': [12, 13, 14, 15, 16, 17], 'seg': 0}, + {'fhrs': [18, 19, 20, 21, 22, 23], 'seg': 0}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=4) == test_groups + + # Test with a break point that aligns with normal split point + test_groups = [{'fhrs': [0, 1, 2, 3, 4, 5], 'seg': 0}, + {'fhrs': [6, 7, 8, 9, 10, 11], 'seg': 0}, + {'fhrs': [12, 13, 14, 15, 16, 17], 'seg': 1}, + {'fhrs': [18, 19, 20, 21, 22, 23], 'seg': 1}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=4, breakpoints=[11]) == test_groups + + # Test with a break point not at a normal split point + test_groups = [{'fhrs': [0, 1, 2, 3, 4, 5, 6, 7], 'seg': 0}, + {'fhrs': [8, 9, 10, 11, 12, 13, 14], 'seg': 0}, + {'fhrs': [15, 16, 17, 18, 19], 'seg': 1}, + {'fhrs': [20, 21, 22, 23], 'seg': 1}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=4, breakpoints=[14]) == test_groups + + # Test highly skewed break point + test_groups = [{'fhrs': [0, 1, 2, 3, 4, 5, 6, 7], 'seg': 0}, + {'fhrs': [8, 9, 10, 11, 12, 13, 14, 15], 'seg': 0}, + {'fhrs': [16, 17, 18, 19, 20, 21, 22], 'seg': 0}, + {'fhrs': [23], 'seg': 1}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=4, breakpoints=[22]) == test_groups + + # Test with two break points that align + test_groups = [{'fhrs': [0, 1, 2, 3, 4, 5], 'seg': 0}, + {'fhrs': [6, 7, 8, 9, 10, 11], 'seg': 0}, + {'fhrs': [12, 13, 14, 15, 16, 17], 'seg': 1}, + {'fhrs': [18, 19, 20, 21, 22, 23], 'seg': 2}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=4, breakpoints=[11, 17]) == test_groups + + # Test with two skewed break points + test_groups = [{'fhrs': [0, 1], 'seg': 0}, + {'fhrs': [2, 3, 4, 5, 6, 7], 'seg': 1}, + {'fhrs': [8, 9, 10, 11, 12], 'seg': 1}, + {'fhrs': [13, 14, 15, 16, 17], 'seg': 1}, + {'fhrs': [18, 19, 20, 21, 22], 'seg': 1}, + {'fhrs': [23], 'seg': 2}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=6, breakpoints=[1, 22]) == test_groups + + # Test slightly irregular break points + test_groups = [{'fhrs': [0, 1, 2, 3], 'seg': 0}, + {'fhrs': [4, 5, 6], 'seg': 0}, + {'fhrs': [7, 8, 9, 10], 'seg': 1}, + {'fhrs': [11, 12, 13, 14], 'seg': 1}, + {'fhrs': [15, 16, 17, 18], 'seg': 1}, + {'fhrs': [19, 20, 21, 22, 23], 'seg': 2}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=6, breakpoints=[6, 18]) == test_groups + + # Test more groups than fhrs available + test_array = list(range(0, 6)) + test_groups = [{'fhrs': [0], 'seg': 0}, + {'fhrs': [1], 'seg': 0}, + {'fhrs': [2], 'seg': 0}, + {'fhrs': [3], 'seg': 0}, + {'fhrs': [4], 'seg': 0}, + {'fhrs': [5], 'seg': 0}] + assert Tasks.get_job_groups(fhrs=test_array, ngroups=15) == test_groups + + def test_multiply_HMS(self): + assert Tasks.multiply_HMS('00:10:00', 2) == '00:20:00' + assert Tasks.multiply_HMS('00:30:00', 10) == '05:00:00' + assert Tasks.multiply_HMS('01:15:00', 4) == '05:00:00' + assert Tasks.multiply_HMS('00:05:00', 1.5) == '00:07:30' + assert Tasks.multiply_HMS('00:40:00', 2.5) == '01:40:00' + assert Tasks.multiply_HMS('00:10:00', 1) == '00:10:00' From 827cf39f054d7b704d9096abca23164cec219df7 Mon Sep 17 00:00:00 2001 From: Kate Friedman Date: Wed, 15 Jan 2025 13:24:54 -0500 Subject: [PATCH 25/33] Resolve bug with LMOD_TMOD_FIND_FIRST setting affecting build on WCOSS2 (#3229) Temporarily comment out the `LMOD_TMOD_FIND_FIRST=yes` setting in `ush/module-setup.sh`. Move it to `ush/load_ufsda_modules.sh` for runtime usage for now. Left note to undo these changes when WCOSS2 is using spack-stack. Also found and corrected a spelling mistake. Refs #3225 --- ush/load_ufsda_modules.sh | 4 ++++ ush/module-setup.sh | 2 +- workflow/generate_workflows.sh | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ush/load_ufsda_modules.sh b/ush/load_ufsda_modules.sh index 8117d3f359..1c15484dd7 100755 --- a/ush/load_ufsda_modules.sh +++ b/ush/load_ufsda_modules.sh @@ -35,6 +35,10 @@ module use "${HOMEgfs}/sorc/gdas.cd/modulefiles" case "${MACHINE_ID}" in ("hera" | "orion" | "hercules" | "wcoss2") + #TODO: Remove LMOD_TMOD_FIND_FIRST line when spack-stack on WCOSS2 + if [[ "${MACHINE_ID}" == "wcoss2" ]]; then + export LMOD_TMOD_FIND_FIRST=yes + fi module load "${MODS}/${MACHINE_ID}" ncdump=$( command -v ncdump ) NETCDF=$( echo "${ncdump}" | cut -d " " -f 3 ) diff --git a/ush/module-setup.sh b/ush/module-setup.sh index 366286d142..2429963d70 100755 --- a/ush/module-setup.sh +++ b/ush/module-setup.sh @@ -52,7 +52,7 @@ elif [[ ${MACHINE_ID} = s4* ]] ; then elif [[ ${MACHINE_ID} = wcoss2 ]]; then # We are on WCOSS2 # Ignore default modules of the same version lower in the search path (req'd by spack-stack) - export LMOD_TMOD_FIND_FIRST=yes + #export LMOD_TMOD_FIND_FIRST=yes #TODO: Uncomment this when using spack-stack module reset elif [[ ${MACHINE_ID} = cheyenne* ]] ; then diff --git a/workflow/generate_workflows.sh b/workflow/generate_workflows.sh index dbd360fda2..152e442dec 100755 --- a/workflow/generate_workflows.sh +++ b/workflow/generate_workflows.sh @@ -5,7 +5,7 @@ function _usage() { cat << EOF This script automates the experiment setup process for the global workflow. Options are also available to update submodules, build the workflow (with - specific build flags), specicy which YAMLs and YAML directory to run, and + specific build flags), specify which YAMLs and YAML directory to run, and whether to automatically update your crontab. Usage: generate_workflows.sh [OPTIONS] /path/to/RUNTESTS From 3acea8821f77d75027c7098ad341f07474b415bb Mon Sep 17 00:00:00 2001 From: AndrewEichmann-NOAA <58948505+AndrewEichmann-NOAA@users.noreply.github.com> Date: Thu, 16 Jan 2025 01:09:36 -0500 Subject: [PATCH 26/33] Add bmat task dependency to marine LETKF task (#3224) Adds bmat task dependency to marine LETKF task as the latter requires the ensemble to be staged, which is what the bmat task does. Resolves #3222 --- workflow/rocoto/gfs_tasks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index d709393f95..2094c0b194 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -641,10 +641,10 @@ def prepoceanobs(self): def marineanlletkf(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'enkfgdas_fcst', 'offset': f"-{timedelta_to_HMS(self._base['interval_gdas'])}"} - deps.append(rocoto.add_dependency(dep_dict)) dep_dict = {'type': 'task', 'name': f'{self.run}_prepoceanobs'} deps.append(rocoto.add_dependency(dep_dict)) + dep_dict = {'type': 'task', 'name': f'{self.run}_marinebmat'} + deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('marineanlletkf') From eb10ee8ad852e89c90a133e2116cf60266eae329 Mon Sep 17 00:00:00 2001 From: Eric Sinsky - NOAA <48259628+EricSinsky-NOAA@users.noreply.github.com> Date: Thu, 16 Jan 2025 13:48:23 -0500 Subject: [PATCH 27/33] Add additional GEFS variables needed for AI/ML applications (#3221) This PR covers a few items that were merged to the `feature/gefs_reforecast` branch and should be brought to the `develop` branch: - Add additional GEFS variables needed for AI/ML applications - Add umax, vmax and wndmax as requested by CPC - Add ensemble header information for atmosphere products - Remove duplicate OCNSPPT and EPBL settings With the additional variables, the data size has increased from 7.41GB to 7.96GB for a given member in atmos products for the C48_S2SWA_gefs CI case. Resolves #3205 Refs #3183 --- parm/product/gefs.0p25.fFFF.paramlist.a.txt | 3 ++ parm/product/gefs.0p25.fFFF.paramlist.b.txt | 36 +++++++++++++++++++++ parm/ufs/fv3/diag_table | 3 ++ parm/ufs/fv3/diag_table_replay | 3 ++ scripts/exglobal_atmos_products.sh | 2 +- ush/forecast_predet.sh | 12 +++++++ ush/parsing_namelists_FV3.sh | 18 ----------- 7 files changed, 58 insertions(+), 19 deletions(-) diff --git a/parm/product/gefs.0p25.fFFF.paramlist.a.txt b/parm/product/gefs.0p25.fFFF.paramlist.a.txt index 4bb87c32ff..afe8464789 100644 --- a/parm/product/gefs.0p25.fFFF.paramlist.a.txt +++ b/parm/product/gefs.0p25.fFFF.paramlist.a.txt @@ -38,3 +38,6 @@ :MSLET:mean sea level: :VIS:surface: :HGT:cloud ceiling: +:MAXUW:10 m above ground: +:MAXVW:10 m above ground: +:WIND:10 m above ground: diff --git a/parm/product/gefs.0p25.fFFF.paramlist.b.txt b/parm/product/gefs.0p25.fFFF.paramlist.b.txt index 5c406ce34d..f0ed5b5cb9 100644 --- a/parm/product/gefs.0p25.fFFF.paramlist.b.txt +++ b/parm/product/gefs.0p25.fFFF.paramlist.b.txt @@ -73,10 +73,13 @@ :HGT:100 mb: :HGT:10 mb: :HGT:1 mb: +:HGT:125 mb: :HGT:150 mb: +:HGT:175 mb: :HGT:200 mb: :HGT:20 mb: :HGT:2 mb: +:HGT:225 mb: :HGT:250 mb: :HGT:300 mb: :HGT:30 mb: @@ -94,8 +97,11 @@ :HGT:70 mb: :HGT:7 mb: :HGT:750 mb: +:HGT:775 mb: :HGT:800 mb: +:HGT:825 mb: :HGT:850 mb: +:HGT:875 mb: :HGT:900 mb: :HGT:925 mb: :HGT:950 mb: @@ -238,11 +244,14 @@ :SPFH:1 mb: :SPFH:120-90 mb above ground: :SPFH:150-120 mb above ground: +:SPFH:125 mb: :SPFH:150 mb: +:SPFH:175 mb: :SPFH:180-150 mb above ground: :SPFH:200 mb: :SPFH:20 mb: :SPFH:2 mb: +:SPFH:225 mb: :SPFH:250 mb: :SPFH:2 m above ground: :SPFH:300 mb: @@ -263,9 +272,12 @@ :SPFH:70 mb: :SPFH:7 mb: :SPFH:750 mb: +:SPFH:775 mb: :SPFH:800 mb: +:SPFH:825 mb: :SPFH:80 m above ground: :SPFH:850 mb: +:SPFH:875 mb: :SPFH:900 mb: :SPFH:90-60 mb above ground: :SPFH:925 mb: @@ -286,12 +298,15 @@ :TMP:1 mb: :TMP:120-90 mb above ground: :TMP:150-120 mb above ground: +:TMP:125 mb: :TMP:150 mb: +:TMP:175 mb: :TMP:180-150 mb above ground: :TMP:1829 m above mean sea level: :TMP:200 mb: :TMP:20 mb: :TMP:2 mb: +:TMP:225 mb: :TMP:250 mb: :TMP:2743 m above mean sea level: :TMP:300 mb: @@ -321,9 +336,12 @@ :TMP:70 mb: :TMP:7 mb: :TMP:750 mb: +:TMP:775 mb: :TMP:800 mb: :TMP:80 m above ground: +:TMP:825 mb: :TMP:850 mb: +:TMP:875 mb: :TMP:900 mb: :TMP:90-60 mb above ground: :TMP:914 m above mean sea level: @@ -354,14 +372,17 @@ :UGRD:1000 mb: :UGRD:100 m above ground: :UGRD:100 mb: +:UGRD:125 mb: :UGRD:10 mb: :UGRD:1 mb: :UGRD:120-90 mb above ground: :UGRD:150-120 mb above ground: :UGRD:150 mb: +:UGRD:175 mb: :UGRD:180-150 mb above ground: :UGRD:1829 m above mean sea level: :UGRD:200 mb: +:UGRD:225 mb: :UGRD:20 mb: :UGRD:2 mb: :UGRD:250 mb: @@ -393,9 +414,12 @@ :UGRD:70 mb: :UGRD:7 mb: :UGRD:750 mb: +:UGRD:775 mb: :UGRD:800 mb: :UGRD:80 m above ground: +:UGRD:825 mb: :UGRD:850 mb: +:UGRD:875 mb: :UGRD:900 mb: :UGRD:90-60 mb above ground: :UGRD:914 m above mean sea level: @@ -422,14 +446,17 @@ :VGRD:1000 mb: :VGRD:100 m above ground: :VGRD:100 mb: +:VGRD:125 mb: :VGRD:10 mb: :VGRD:1 mb: :VGRD:120-90 mb above ground: :VGRD:150-120 mb above ground: :VGRD:150 mb: +:VGRD:175 mb: :VGRD:180-150 mb above ground: :VGRD:1829 m above mean sea level: :VGRD:200 mb: +:VGRD:225 mb: :VGRD:20 mb: :VGRD:2 mb: :VGRD:250 mb: @@ -461,9 +488,12 @@ :VGRD:70 mb: :VGRD:7 mb: :VGRD:750 mb: +:VGRD:775 mb: :VGRD:800 mb: :VGRD:80 m above ground: +:VGRD:825 mb: :VGRD:850 mb: +:VGRD:875 mb: :VGRD:900 mb: :VGRD:90-60 mb above ground: :VGRD:914 m above mean sea level: @@ -497,8 +527,11 @@ :VVEL:70 mb: :VVEL:1000 mb: :VVEL:100 mb: +:VVEL:125 mb: :VVEL:150 mb: +:VVEL:175 mb: :VVEL:200 mb: +:VVEL:225 mb: :VVEL:250 mb: :VVEL:300 mb: :VVEL:350 mb: @@ -510,8 +543,11 @@ :VVEL:650 mb: :VVEL:700 mb: :VVEL:750 mb: +:VVEL:775 mb: :VVEL:800 mb: +:VVEL:825 mb: :VVEL:850 mb: +:VVEL:875 mb: :VVEL:900 mb: :VVEL:925 mb: :VVEL:950 mb: diff --git a/parm/ufs/fv3/diag_table b/parm/ufs/fv3/diag_table index f44bfd82a4..ba4f9c793d 100644 --- a/parm/ufs/fv3/diag_table +++ b/parm/ufs/fv3/diag_table @@ -178,6 +178,9 @@ "gfs_phys", "u10m", "ugrd10m", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "v10m", "vgrd10m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "wind10mmax", "wind10m_max", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "u10mmax", "u10m_max", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "v10mmax", "v10m_max", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "pahi", "pahi", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "pah_ave", "pah_ave", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "ecan_acc", "ecan_acc", "fv3_history2d", "all", .false., "none", 2 diff --git a/parm/ufs/fv3/diag_table_replay b/parm/ufs/fv3/diag_table_replay index 01f2cf9794..e197510b34 100644 --- a/parm/ufs/fv3/diag_table_replay +++ b/parm/ufs/fv3/diag_table_replay @@ -232,6 +232,9 @@ ocean_model, "Heat_PmE", "Heat_PmE", "@[MOM6_OUTPUT_DIR]/ocn_lead1%4yr%2mo "gfs_phys", "u10m", "ugrd10m", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "v10m", "vgrd10m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "wind10mmax", "wind10m_max", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "u10mmax", "u10m_max", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "v10mmax", "v10m_max", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "pahi", "pahi", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "pah_ave", "pah_ave", "fv3_history2d", "all", .false., "none", 2 "gfs_phys", "ecan_acc", "ecan_acc", "fv3_history2d", "all", .false., "none", 2 diff --git a/scripts/exglobal_atmos_products.sh b/scripts/exglobal_atmos_products.sh index 51e1a108bb..e963f4b2f5 100755 --- a/scripts/exglobal_atmos_products.sh +++ b/scripts/exglobal_atmos_products.sh @@ -104,7 +104,7 @@ for (( nset=1 ; nset <= downset ; nset++ )); do # grep returns 1 if no match is found, so temporarily turn off exit on non-zero rc set +e # shellcheck disable=SC2312 - ${WGRIB2} -d "${last}" "${tmpfile}" | grep -E -i "ugrd|ustm|uflx|u-gwd|land" + ${WGRIB2} -d "${last}" "${tmpfile}" | grep -E -i "ugrd|ustm|uflx|u-gwd|land|maxuw" rc=$? set_strict if (( rc == 0 )); then # Matched the grep diff --git a/ush/forecast_predet.sh b/ush/forecast_predet.sh index 2b730fa7d6..fbe26595a9 100755 --- a/ush/forecast_predet.sh +++ b/ush/forecast_predet.sh @@ -574,6 +574,18 @@ FV3_predet(){ ${NCP} "${PARMgfs}/post/sfs/postxconfig-NT-sfs.txt" "${DATA}/postxconfig-NT.txt" ${NCP} "${PARMgfs}/post/sfs/postxconfig-NT-sfs.txt" "${DATA}/postxconfig-NT_FH00.txt" fi + + # For gefs run, provide ensemble header information + if [[ "${RUN}" == "gefs" ]]; then + if [[ "${ENSMEM}" == "000" ]]; then + export e1=1 + else + export e1=3 + fi + export e2="${ENSMEM:1:2}" + export e3="${NMEM_ENS}" + fi + fi } diff --git a/ush/parsing_namelists_FV3.sh b/ush/parsing_namelists_FV3.sh index bb6a204cc8..2628b03594 100755 --- a/ush/parsing_namelists_FV3.sh +++ b/ush/parsing_namelists_FV3.sh @@ -711,24 +711,6 @@ EOF EOF fi - if [[ "${DO_OCN_SPPT:-NO}" == "YES" ]]; then - cat >> input.nml <> input.nml <> input.nml << EOF / EOF From 3a5af9a32e6c625baebc347585753df3d95519ca Mon Sep 17 00:00:00 2001 From: AntonMFernando-NOAA <167725623+AntonMFernando-NOAA@users.noreply.github.com> Date: Thu, 16 Jan 2025 16:34:49 -0500 Subject: [PATCH 28/33] Adding a gefs_arch task to GEFS workflow (#3211) The `gefs_arch` task is not generated during the execution of the workflow setup scripts. It is only created when `do_extractvars` is set to true, with `do_extractvars` being false by default. This pull request ensures that the `gefs_arch` task will be created in the workflow regardless of the value of `do_extractvars`. Resolves #3151 --- parm/archive/gefs_extracted_atmos.yaml.j2 | 86 ----------------------- parm/archive/gefs_extracted_ice.yaml.j2 | 33 --------- parm/archive/gefs_extracted_ocean.yaml.j2 | 40 ----------- parm/archive/gefs_extracted_wave.yaml.j2 | 51 -------------- parm/archive/master_gefs.yaml.j2 | 7 -- workflow/applications/gefs.py | 4 +- workflow/rocoto/gefs_tasks.py | 32 ++------- 7 files changed, 6 insertions(+), 247 deletions(-) delete mode 100644 parm/archive/gefs_extracted_atmos.yaml.j2 delete mode 100644 parm/archive/gefs_extracted_ice.yaml.j2 delete mode 100644 parm/archive/gefs_extracted_ocean.yaml.j2 delete mode 100644 parm/archive/gefs_extracted_wave.yaml.j2 diff --git a/parm/archive/gefs_extracted_atmos.yaml.j2 b/parm/archive/gefs_extracted_atmos.yaml.j2 deleted file mode 100644 index 7ceba551bf..0000000000 --- a/parm/archive/gefs_extracted_atmos.yaml.j2 +++ /dev/null @@ -1,86 +0,0 @@ -{% set cycle_HH = current_cycle | strftime("%H") %} -{% set cycle_YMD = current_cycle | to_YMD %} -{% set cycle_YMDH = current_cycle | to_YMDH %} -{% set head = RUN + ".t" + cycle_HH + "z." %} - -gefs_atmos: - name: "GEFS_ATMOS" - target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gefs_atmos.tar" - required: -#select mem%03d and ensstat files required -{% set members = ["ensstat"] %} -{% for mem_nm in range(0, NMEM_ENS + 1) %} - {% do members.append("mem" ~ '%03d' % mem_nm ) %} -{% endfor %} - -{% if REPLAY_ICS %} - {% set ofst_hr = OFFSET_START_HOUR %} -{% else %} - {% set ofst_hr = FHMIN_GFS %} -{% endif %} - -{% for mem in members %} - {% for res in ['0p25', '0p50', '1p00'] %} - {% set tmpl_dict = ({ '${ROTDIR}':ROTDIR, - '${RUN}':RUN, - '${YMD}':cycle_YMD, - '${HH}':cycle_HH, - '${GRID}':res, - '${MEMDIR}':mem }) %} - - {% set COMIN_ATMOS_GRIB = COM_ATMOS_GRIB_GRID_TMPL | replace_tmpl(tmpl_dict) %} - -# Select pgrb and grib files to copy to the atardir - {% if path_exists(COMIN_ATMOS_GRIB) %} - {% if FHMAX_HF_GFS == 0 %} - {% for fhr in range(ofst_hr, FHMAX_GFS + FHOUT_GFS, FHOUT_GFS) %} - {% if mem=="ensstat" %} - {% set file_name = head ~ "mean.pres_." ~ res ~ ".f" ~ '%03d'|format(fhr) ~ ".grib2" %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% else %} - {% set file_name = head ~ "pgrb2." ~ res ~ ".f" ~ '%03d'|format(fhr) %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% set file_name = head ~ "pgrb2b." ~ res ~ ".f" ~ '%03d'|format(fhr) %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% endif %} - {% endfor %} - {% else %} - {% if res == "0p25" %} - {% for fhr in range(ofst_hr, FHMAX_HF_GFS + FHOUT_HF_GFS, FHOUT_HF_GFS) %} - {% if mem=="ensstat" %} - {% set file_name = head ~ "mean.pres_." ~ res ~ ".f" ~ '%03d'|format(fhr) ~ ".grib2" %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% else %} - {% set file_name = head ~ "pgrb2." ~ res ~ ".f" ~ '%03d'|format(fhr) %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% set file_name = head ~ "pgrb2b." ~ res ~ ".f" ~ '%03d'|format(fhr) %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% endif %} - {% endfor %} - {% endif %} - {% if res == "0p50" %} - {% for fhr in range(FHMAX_HF_GFS + FHOUT_GFS, FHMAX_GFS + FHOUT_GFS, FHOUT_GFS) %} - {% if mem=="ensstat" %} - {% set file_name = head ~ "mean.pres_." ~ res ~ ".f" ~ '%03d'|format(fhr) ~ ".grib2" %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% else %} - {% set file_name = head ~ "pgrb2." ~ res ~ ".f" ~ '%03d'|format(fhr) %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% set file_name = head ~ "pgrb2b." ~ res ~ ".f" ~ '%03d'|format(fhr) %} - {% set file_path = COMIN_ATMOS_GRIB ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% endif %} - {% endfor %} - {% endif %} - {% endif %} - {% endif %} - {% endfor %} -{% endfor %} diff --git a/parm/archive/gefs_extracted_ice.yaml.j2 b/parm/archive/gefs_extracted_ice.yaml.j2 deleted file mode 100644 index 786d502f23..0000000000 --- a/parm/archive/gefs_extracted_ice.yaml.j2 +++ /dev/null @@ -1,33 +0,0 @@ -{% set cycle_HH = current_cycle | strftime("%H") %} -{% set cycle_YMD = current_cycle | to_YMD %} -{% set cycle_YMDH = current_cycle | to_YMDH %} -{% set head = RUN + ".ice.t" + cycle_HH + "z." %} - -gefs_ice: - name: "GEFS_ICE" - target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gefs_ice.tar" - required: -#select mem%03d and ensstat files required -{% set members = [] %} -{% for mem_nm in range(0, NMEM_ENS + 1) %} - {% do members.append("mem" ~ '%03d' % mem_nm ) %} -{% endfor %} - -{% for mem in members %} - {% set tmpl_dict = ({ '${ROTDIR}':ROTDIR, - '${RUN}':RUN, - '${YMD}':cycle_YMD, - '${HH}':cycle_HH, - '${MEMDIR}':mem }) %} - - {% set COMIN_ICE_HISTORY = COM_ICE_HISTORY_TMPL | replace_tmpl(tmpl_dict) %} - -# Select netcdf files to copy to the atardir - {% if path_exists(COMIN_ICE_HISTORY) %} - {% for fhr in range(FHMIN_GFS + FHOUT_ICE_GFS, FHMAX_GFS + FHOUT_ICE_GFS, FHOUT_ICE_GFS) %} - {% set file_name = head ~ FHOUT_ICE_GFS ~ "hr_avg" ~ ".f" ~ '%03d'|format(fhr) ~ ".nc" %} - {% set file_path = COMIN_ICE_HISTORY ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% endfor %} - {% endif %} -{% endfor %} diff --git a/parm/archive/gefs_extracted_ocean.yaml.j2 b/parm/archive/gefs_extracted_ocean.yaml.j2 deleted file mode 100644 index e5e3b36e3b..0000000000 --- a/parm/archive/gefs_extracted_ocean.yaml.j2 +++ /dev/null @@ -1,40 +0,0 @@ -{% set cycle_HH = current_cycle | strftime("%H") %} -{% set cycle_YMD = current_cycle | to_YMD %} -{% set cycle_YMDH = current_cycle | to_YMDH %} -{% set head = RUN + ".ocean.t" + cycle_HH + "z." %} - -gefs_ocean: - name: "GEFS_OCEAN" - target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gefs_ocean.tar" - required: -#select mem%03d and ensstat files required -{% set members = [] %} -{% for mem_nm in range(0, NMEM_ENS + 1) %} - {% do members.append("mem" ~ '%03d' % mem_nm ) %} -{% endfor %} - -{% if OCNRES == "025" %} - {% set res = "1p00" %} -{% else %} - {% set res = (OCNRES|string())[0] ~ "p" ~ (OCNRES|string())[-2:] %} -{% endif %} - -{% for mem in members %} - {% set tmpl_dict = ({ '${ROTDIR}':ROTDIR, - '${RUN}':RUN, - '${YMD}':cycle_YMD, - '${HH}':cycle_HH, - '${MEMDIR}':mem }) %} - - {% set COMIN_OCEAN_NETCDF = COM_OCEAN_NETCDF_TMPL | replace_tmpl(tmpl_dict) %} - - # Select netcdf files to copy to the atardir - {% set netcdf_grid_dir = COMIN_OCEAN_NETCDF ~ "/" ~ res %} - {% if path_exists(netcdf_grid_dir) %} - {% for fhr in range(FHMIN_GFS + FHOUT_OCN_GFS, FHMAX_GFS + FHOUT_OCN_GFS, FHOUT_OCN_GFS) %} - {% set file_name = head ~ res ~ ".f" ~ '%03d'|format(fhr) ~ ".nc" %} - {% set file_path = netcdf_grid_dir ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% endfor %} - {% endif %} -{% endfor %} diff --git a/parm/archive/gefs_extracted_wave.yaml.j2 b/parm/archive/gefs_extracted_wave.yaml.j2 deleted file mode 100644 index e0aa07c816..0000000000 --- a/parm/archive/gefs_extracted_wave.yaml.j2 +++ /dev/null @@ -1,51 +0,0 @@ -{% set cycle_HH = current_cycle | strftime("%H") %} -{% set cycle_YMD = current_cycle | to_YMD %} -{% set cycle_YMDH = current_cycle | to_YMDH %} -{% set head = RUN + "wave.t" + cycle_HH + "z." %} - -gefs_wave: - name: "GEFS_WAVE" - target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gefs_wave.tar" - required: -{% if REPLAY_ICS %} - {% set ofst_hr = OFFSET_START_HOUR %} -{% else %} - {% set ofst_hr = FHMIN_GFS %} -{% endif %} - -{% set res = (waveGRD[-3:])[0] ~ "p" ~ (waveGRD[-3:])[-2:] %} - -#select mem%03d and ensstat files required -{% set members = [] %} -{% for mem_nm in range(0, NMEM_ENS + 1) %} - {% do members.append("mem" ~ '%03d' % mem_nm ) %} -{% endfor %} - -{% for mem in members %} - {% set tmpl_dict = ({ '${ROTDIR}':ROTDIR, - '${RUN}':RUN, - '${YMD}':cycle_YMD, - '${HH}':cycle_HH, - '${MEMDIR}':mem }) %} - - {% set COMIN_WAVE_GRID = COM_WAVE_GRID_TMPL | replace_tmpl(tmpl_dict) %} - # Select grib2 files to copy to the atardir - {% if path_exists(COMIN_WAVE_GRID) %} - {% for fhr in range(ofst_hr, FHMAX_GFS + FHOUT_WAV, FHOUT_WAV) %} - {% set file_name = head ~ "global." ~ res ~ ".f" ~ '%03d'|format(fhr) ~ ".grib2" %} - {% set file_path = COMIN_WAVE_GRID ~ "/" ~ file_name %} - - "{{ file_path | relpath(ROTDIR)}}" - {% endfor %} - {% endif %} - - {% set COMIN_WAVE_STATION = COM_WAVE_STATION_TMPL | replace_tmpl(tmpl_dict) %} - # Select station files to copy to the atardir - {% if path_exists(COMIN_WAVE_STATION) %} - {% set file_path = COMIN_WAVE_STATION ~ "/" ~ RUN ~ "wave.t" ~ cycle_HH ~ "z.spec_tar.gz" %} - - "{{ file_path | relpath(ROTDIR)}}" - {% set file_path = COMIN_WAVE_STATION ~ "/" ~ RUN ~ "wave.t" ~ cycle_HH ~ "z.cbull_tar" %} - - "{{ file_path | relpath(ROTDIR)}}" - {% set file_path = COMIN_WAVE_STATION ~ "/" ~ RUN ~ "wave.t" ~ cycle_HH ~ "z.bull_tar" %} - - "{{ file_path | relpath(ROTDIR)}}" - {% endif %} -{% endfor %} diff --git a/parm/archive/master_gefs.yaml.j2 b/parm/archive/master_gefs.yaml.j2 index e76d7c9f7a..e33215a23c 100644 --- a/parm/archive/master_gefs.yaml.j2 +++ b/parm/archive/master_gefs.yaml.j2 @@ -4,13 +4,6 @@ {% set cycle_YMDH = current_cycle | to_YMDH %} datasets: -{% filter indent(width=4) %} - {% include "gefs_extracted_atmos.yaml.j2" %} - {% include "gefs_extracted_ocean.yaml.j2" %} - {% include "gefs_extracted_ice.yaml.j2" %} - {% include "gefs_extracted_wave.yaml.j2" %} -{% endfilter %} - # Archive the EXPDIR if requested {% if archive_expdir %} {% filter indent(width=4) %} diff --git a/workflow/applications/gefs.py b/workflow/applications/gefs.py index 33545eb2ec..2e08ddc21d 100644 --- a/workflow/applications/gefs.py +++ b/workflow/applications/gefs.py @@ -91,8 +91,8 @@ def get_task_names(self): tasks += ['wavepostpnt'] if options['do_extractvars']: - tasks += ['extractvars', 'arch'] + tasks += ['extractvars'] - tasks += ['cleanup'] + tasks += ['arch', 'cleanup'] return {f"{self.run}": tasks} diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py index f89d3dbbb0..f1b1cd1ea2 100644 --- a/workflow/rocoto/gefs_tasks.py +++ b/workflow/rocoto/gefs_tasks.py @@ -569,7 +569,7 @@ def arch(self): dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') resources = self.get_resource('arch') - task_name = 'arch' + task_name = 'gefs_arch' task_dict = {'task_name': task_name, 'resources': resources, 'envars': self.envars, @@ -587,33 +587,9 @@ def arch(self): def cleanup(self): deps = [] - if self.options['do_extractvars']: - dep_dict = {'type': 'task', 'name': 'arch'} - deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) - else: - dep_dict = {'type': 'metatask', 'name': 'gefs_atmos_prod'} - deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'metatask', 'name': 'gefs_atmos_ensstat'} - deps.append(rocoto.add_dependency(dep_dict)) - if self.options['do_ice']: - dep_dict = {'type': 'metatask', 'name': 'gefs_ice_prod'} - deps.append(rocoto.add_dependency(dep_dict)) - if self.options['do_ocean']: - dep_dict = {'type': 'metatask', 'name': 'gefs_ocean_prod'} - deps.append(rocoto.add_dependency(dep_dict)) - if self.options['do_wave']: - dep_dict = {'type': 'metatask', 'name': 'gefs_wave_post_grid'} - deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'metatask', 'name': 'gefs_wave_post_pnt'} - deps.append(rocoto.add_dependency(dep_dict)) - if self.options['do_wave_bnd']: - dep_dict = {'type': 'metatask', 'name': 'gefs_wave_post_bndpnt'} - deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'metatask', 'name': 'gefs_wave_post_bndpnt_bull'} - deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps, dep_condition='and') - + dep_dict = {'type': 'task', 'name': 'gefs_arch'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps) resources = self.get_resource('cleanup') task_name = 'gefs_cleanup' task_dict = {'task_name': task_name, From 4c6c6a41992f44023e1826ff551452295737d80d Mon Sep 17 00:00:00 2001 From: RussTreadon-NOAA <26926959+RussTreadon-NOAA@users.noreply.github.com> Date: Fri, 17 Jan 2025 01:49:35 -0500 Subject: [PATCH 29/33] Move WCOSS2 LD_LIBRARY_PATH patches to load_ufsda_modules.sh (#3236) This PR moves the `LD_LIBRARY_PATH` patches currently needed to run GDASApp on WCOSS2 from `WCOSS2.env` to `load_ufsda_modules.sh`. With this change, the `LD_LIBRARY_PATH` patch is only applied to WCOSS2 GDASApp jobs. This change was suggested by @aerorahul Resolves #3232 --- env/WCOSS2.env | 5 ----- ush/load_ufsda_modules.sh | 4 ++++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/env/WCOSS2.env b/env/WCOSS2.env index 6c476cda5d..fe6acf88fb 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -13,11 +13,6 @@ step=$1 export launcher="mpiexec -l" export mpmd_opt="--cpu-bind verbose,core cfp" -# TODO: Add path to GDASApp libraries and cray-mpich as temporary patches -# TODO: Remove LD_LIBRARY_PATH lines as soon as permanent solutions are available -export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${HOMEgfs}/sorc/gdas.cd/build/lib" -export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/cray/pe/mpich/8.1.19/ofi/intel/19.0/lib" - # Calculate common resource variables # Check first if the dependent variables are set if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:-}" ]]; then diff --git a/ush/load_ufsda_modules.sh b/ush/load_ufsda_modules.sh index 1c15484dd7..9c7c57c330 100755 --- a/ush/load_ufsda_modules.sh +++ b/ush/load_ufsda_modules.sh @@ -38,6 +38,10 @@ case "${MACHINE_ID}" in #TODO: Remove LMOD_TMOD_FIND_FIRST line when spack-stack on WCOSS2 if [[ "${MACHINE_ID}" == "wcoss2" ]]; then export LMOD_TMOD_FIND_FIRST=yes + # TODO: Add path to GDASApp libraries and cray-mpich as temporary patches + # TODO: Remove LD_LIBRARY_PATH lines as soon as permanent solutions are available + export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${HOMEgfs}/sorc/gdas.cd/build/lib" + export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/opt/cray/pe/mpich/8.1.19/ofi/intel/19.0/lib" fi module load "${MODS}/${MACHINE_ID}" ncdump=$( command -v ncdump ) From 57ce1b0c37b36e6269f20653c122f14dcba61336 Mon Sep 17 00:00:00 2001 From: Jessica Meixner Date: Fri, 17 Jan 2025 02:49:09 -0500 Subject: [PATCH 30/33] Make assorted updates to waves (#3190) This PR adds the following: * converting from inp -> nml (@sbanihash) * turning on PIO for waves for restarts (@sbanihash) * enabling cycling for WW3 which required some updates to wave prep jobs + changing what restarts are being saved/etc * changed the way CEMPS, MOM6, CICE and WW3 write restarts to be in sync with FV3 for IAU, which required moving the ufs-weather-model forward one hash to use the new flexible restart feature. (UFS PR https://github.com/ufs-community/ufs-weather-model/pull/2419) * adds uglo_15km - the targeted new wave grid. * Update to use new esmf_threading ufs.configure files which changes the toggle between how you use esmf threading versus traditional threading (UFS PR https://github.com/ufs-community/ufs-weather-model/pull/2538) Notes on ufs-weather-model updates: | Commit date | Commit hash/ PR | Notes for g-w changes | Baseline Changes | | :------------- | :------------- | :------------- | :------------- | | Dec 11, 2024 | https://github.com/ufs-community/ufs-weather-model/commit/409bc85b64b2ced642b0024cef2cd9c78ce46fd9 https://github.com/ufs-community/ufs-weather-model/pull/2419 | Enables flexible restart writes - changes included in g-w PR | none| | Dec 16, 2024 | https://github.com/ufs-community/ufs-weather-model/commit/6ec6b458b6dc09af48658146d3908502b18272cf https://github.com/ufs-community/ufs-weather-model/pull/2528 https://github.com/ufs-community/ufs-weather-model/pull/2469 | n/a | HAFs test changes, no global changes | | Dec 18, 2024 |https://github.com/ufs-community/ufs-weather-model/commit/e1193704767800bfaece56eb2a4b058bd4d0afbc https://github.com/ufs-community/ufs-weather-model/pull/2448 | Adds Gaea C6 support (changes in other g-w PRs, not here) | none | |Dec 23, 2024 | https://github.com/ufs-community/ufs-weather-model/commit/295008915d1ad09fb5d4e24624d0c19627273af4 https://github.com/ufs-community/ufs-weather-model/pull/2533 https://github.com/ufs-community/ufs-weather-model/pull/2538 | changes for ESMF vs traditional threading | none | |Dec 30, 2024 | https://github.com/ufs-community/ufs-weather-model/commit/241dd8e3b9feae29f1925806bdb05816ae49f427 https://github.com/ufs-community/ufs-weather-model/pull/2485 | n/a | changes in conus13km, no global changes| |Jan 3, 2025 | https://github.com/ufs-community/ufs-weather-model/commit/76471dc6b7bfc3342416d1a3402f360724f7c0fa https://github.com/ufs-community/ufs-weather-model/pull/2530 | n/a | changes in regional tests, no global changes | Note this PR requires the following: * update to fix files to add uglo_15km * staging ICs for high resolution test case for uglo_15km Co-author: @sbanihash Related Issues: - Fixes #1457 - Fixes #3154 - Fixes #1795 - related to #1776 --------- Co-authored-by: Rahul Mahajan Co-authored-by: Saeideh Banihashemi Co-authored-by: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> Co-authored-by: Walter Kolczynski - NOAA --- .gitignore | 1 + docs/source/wave.rst | 2 + parm/archive/gdaswave.yaml.j2 | 2 +- parm/config/gefs/config.ufs | 9 +- parm/config/gfs/config.base | 2 +- parm/config/gfs/config.resources | 4 +- parm/config/gfs/config.ufs | 19 ++- parm/config/gfs/config.wave | 12 +- parm/wave/ak_10m_interp.inp.tmpl | 5 +- parm/wave/at_10m_interp.inp.tmpl | 2 +- parm/wave/ep_10m_interp.inp.tmpl | 2 +- parm/wave/glo_15mxt_interp.inp.tmpl | 2 +- parm/wave/glo_30m_interp.inp.tmpl | 2 +- parm/wave/wc_10m_interp.inp.tmpl | 2 +- parm/wave/ww3_shel.inp.tmpl | 42 ----- sorc/link_workflow.sh | 8 +- sorc/ufs_model.fd | 2 +- ush/forecast_postdet.sh | 95 +++++++++-- ush/forecast_predet.sh | 24 +++ ush/parsing_model_configure_FV3.sh | 1 + ush/parsing_namelists_WW3.sh | 181 +++++---------------- ush/parsing_ufs_configure.sh | 15 +- workflow/applications/gfs_cycled.py | 4 +- workflow/applications/gfs_forecast_only.py | 2 +- workflow/rocoto/gfs_tasks.py | 10 +- 25 files changed, 207 insertions(+), 243 deletions(-) delete mode 100644 parm/wave/ww3_shel.inp.tmpl diff --git a/.gitignore b/.gitignore index 7669dac845..c2d9c9db45 100644 --- a/.gitignore +++ b/.gitignore @@ -81,6 +81,7 @@ parm/ufs/MOM6_data_table.IN parm/ufs/ice_in.IN parm/ufs/ufs.configure.*.IN parm/ufs/post_itag_gfs +parm/ufs/ww3_shel.nml.IN parm/wafs # Ignore sorc and logs folders from externals diff --git a/docs/source/wave.rst b/docs/source/wave.rst index 56aa34ce3b..52a984b6b3 100644 --- a/docs/source/wave.rst +++ b/docs/source/wave.rst @@ -21,6 +21,8 @@ Here are several regional naming conventions: +===========+=======================+ | glo | Global domain | +-----------+-----------------------+ +| uglo | Unstructured global | ++-----------+-----------------------+ | ak | Alaska | +-----------+-----------------------+ | ao or aoc | Arctic Ocean | diff --git a/parm/archive/gdaswave.yaml.j2 b/parm/archive/gdaswave.yaml.j2 index 220770b38d..109c70c181 100644 --- a/parm/archive/gdaswave.yaml.j2 +++ b/parm/archive/gdaswave.yaml.j2 @@ -1,5 +1,5 @@ gdaswave: - {% set head = "gdas.wave.t" + cycle_HH + "z." %} + {% set head = "gdaswave.t" + cycle_HH + "z." %} name: "GDASWAVE" target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gdaswave.tar" required: diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs index 3f931d7c0d..f2ee7b8619 100644 --- a/parm/config/gefs/config.ufs +++ b/parm/config/gefs/config.ufs @@ -512,9 +512,7 @@ fi # Set the name of the UFS (previously nems) configure template to use # Default ufs.configure templates for supported model configurations -if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then - tmpl_suffix="_esmf" -fi +# WW3 restart field variable is different for slow vs fast loop. Add WW3_RSTFLDS="ice" for slow loop variables based on coupling scheme. case "${model_list}" in atm) default_template="${PARMgfs}/ufs/ufs.configure.atm${tmpl_suffix:-}.IN" @@ -533,9 +531,11 @@ case "${model_list}" in ;; atm.ocean.ice.wave) default_template="${PARMgfs}/ufs/ufs.configure.s2sw${tmpl_suffix:-}.IN" + WW3_RSTFLDS="ice" ;; atm.ocean.ice.wave.aero) default_template="${PARMgfs}/ufs/ufs.configure.s2swa${tmpl_suffix:-}.IN" + WW3_RSTFLDS="ice" ;; *) echo "FATAL ERROR: Unsupported UFSWM configuration for ${model_list}" @@ -547,6 +547,9 @@ esac export ufs_configure_template=${ufs_configure_template:-${default_template:-"/dev/null"}} unset model_list default_template +#export wave restart variable: +export WW3_RSTFLDS=${WW3_RSTFLDS:-" "} + if [[ ! -r "${ufs_configure_template}" ]]; then echo "FATAL ERROR: ${ufs_configure_template} either doesn't exist or is not readable." exit 17 diff --git a/parm/config/gfs/config.base b/parm/config/gfs/config.base index 4781f97274..fbbe5c782f 100644 --- a/parm/config/gfs/config.base +++ b/parm/config/gfs/config.base @@ -219,7 +219,7 @@ case "${CASE}" in export waveGRD='uglo_100km' ;; "C768" | "C1152") - export waveGRD='uglo_m1g16' + export waveGRD='uglo_15km' ;; *) echo "FATAL ERROR: Unrecognized CASE ${CASE}, ABORT!" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 0eea92cbde..aac06ff03c 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -198,8 +198,8 @@ case ${step} in threads_per_task=1 tasks_per_node=$(( max_tasks_per_node / threads_per_task )) NTASKS=${ntasks} - memory_gdas="10GB" - memory_gfs="10GB" + memory_gdas="20GB" + memory_gfs="20GB" ;; # The wavepost*pnt* jobs are I/O heavy and do not scale well to large nodes. diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs index 0a16a75cb2..5f2b6675cf 100644 --- a/parm/config/gfs/config.ufs +++ b/parm/config/gfs/config.ufs @@ -15,7 +15,7 @@ if (( $# <= 1 )); then echo "--fv3 C48|C96|C192|C384|C768|C1152|C3072" echo "--mom6 500|100|025" echo "--cice6 500|100|025" - echo "--ww3 gnh_10m;aoc_9km;gsh_15m|gwes_30m|glo_025|glo_100|glo_200|glo_500|mx025|uglo_100km|uglo_m1g16" + echo "--ww3 gnh_10m;aoc_9km;gsh_15m|gwes_30m|glo_025|glo_100|glo_200|glo_500|mx025|uglo_100km" echo "--gocart" exit 1 @@ -605,12 +605,12 @@ if [[ "${skip_ww3}" == "false" ]]; then "mx025") ntasks_ww3=80 ;; - "uglo_100km") - ntasks_ww3=40 + "uglo_15km") + ntasks_ww3=1000 nthreads_ww3=1 ;; - "uglo_m1g16") - ntasks_ww3=1000 + "uglo_100km") + ntasks_ww3=40 nthreads_ww3=1 ;; *) @@ -630,9 +630,7 @@ fi # Set the name of the UFS (previously nems) configure template to use # Default ufs.configure templates for supported model configurations -if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then - tmpl_suffix="_esmf" -fi +# WW3 restart field variable is different for slow vs fast loop. Add WW3_RSTFLDS="ice" for slow loop variables based on coupling scheme. case "${model_list}" in atm) default_template="${PARMgfs}/ufs/ufs.configure.atm${tmpl_suffix:-}.IN" @@ -651,9 +649,11 @@ case "${model_list}" in ;; atm.ocean.ice.wave) default_template="${PARMgfs}/ufs/ufs.configure.s2sw${tmpl_suffix:-}.IN" + WW3_RSTFLDS="ice" ;; atm.ocean.ice.wave.aero) default_template="${PARMgfs}/ufs/ufs.configure.s2swa${tmpl_suffix:-}.IN" + WW3_RSTFLDS="ice" ;; *) echo "FATAL ERROR: Unsupported UFSWM configuration for ${model_list}" @@ -665,6 +665,9 @@ esac export ufs_configure_template=${ufs_configure_template:-${default_template:-"/dev/null"}} unset model_list default_template +# export wave restart variable: +export WW3_RSTFLDS=${WW3_RSTFLDS:-" "} + if [[ ! -r "${ufs_configure_template}" ]]; then echo "FATAL ERROR: ${ufs_configure_template} either doesn't exist or is not readable." exit 17 diff --git a/parm/config/gfs/config.wave b/parm/config/gfs/config.wave index e792f45473..722e1122e4 100644 --- a/parm/config/gfs/config.wave +++ b/parm/config/gfs/config.wave @@ -56,18 +56,18 @@ case "${waveGRD}" in export wavepostGRD='glo_500' export waveuoutpGRD=${waveGRD} ;; + "uglo_15km") + # unstructured 15km grid + export waveinterpGRD='glo_15mxt at_10m ep_10m wc_10m glo_30m' + export wavepostGRD='' + export waveuoutpGRD=${waveGRD} + ;; "uglo_100km") #unstructured 100km grid export waveinterpGRD='glo_200' export wavepostGRD='' export waveuoutpGRD=${waveGRD} ;; - "uglo_m1g16") - #unstructured m1v16 grid - export waveinterpGRD='glo_15mxt' - export wavepostGRD='' - export waveuoutpGRD=${waveGRD} - ;; *) echo "FATAL ERROR: No grid specific wave config values exist for ${waveGRD}. Aborting." exit 1 diff --git a/parm/wave/ak_10m_interp.inp.tmpl b/parm/wave/ak_10m_interp.inp.tmpl index 225ab3628d..c397e17e5d 100755 --- a/parm/wave/ak_10m_interp.inp.tmpl +++ b/parm/wave/ak_10m_interp.inp.tmpl @@ -3,10 +3,9 @@ $------------------------------------------------ $ Start Time DT NSteps TIME DT NSTEPS $ Total number of grids - 3 + 2 $ Grid extensions - 'gnh_10m' - 'aoc_9km' + 'uglo_15km' 'ak_10m' $ 0 diff --git a/parm/wave/at_10m_interp.inp.tmpl b/parm/wave/at_10m_interp.inp.tmpl index 6f4c1f7099..903f49504f 100755 --- a/parm/wave/at_10m_interp.inp.tmpl +++ b/parm/wave/at_10m_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'uglo_m1g16' + 'uglo_15km' 'at_10m' $ 0 diff --git a/parm/wave/ep_10m_interp.inp.tmpl b/parm/wave/ep_10m_interp.inp.tmpl index 23cfd50c2e..292522325c 100755 --- a/parm/wave/ep_10m_interp.inp.tmpl +++ b/parm/wave/ep_10m_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'uglo_m1g16' + 'uglo_15km' 'ep_10m' $ 0 diff --git a/parm/wave/glo_15mxt_interp.inp.tmpl b/parm/wave/glo_15mxt_interp.inp.tmpl index 19e9dae684..e9ba00ef23 100755 --- a/parm/wave/glo_15mxt_interp.inp.tmpl +++ b/parm/wave/glo_15mxt_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'uglo_m1g16' + 'uglo_15km' 'glo_15mxt' $ 0 diff --git a/parm/wave/glo_30m_interp.inp.tmpl b/parm/wave/glo_30m_interp.inp.tmpl index c62881202c..611907fb15 100755 --- a/parm/wave/glo_30m_interp.inp.tmpl +++ b/parm/wave/glo_30m_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'uglo_m1g16' + 'uglo_15km' 'glo_30m' $ 0 diff --git a/parm/wave/wc_10m_interp.inp.tmpl b/parm/wave/wc_10m_interp.inp.tmpl index 8338c91d0c..234a9a1f93 100755 --- a/parm/wave/wc_10m_interp.inp.tmpl +++ b/parm/wave/wc_10m_interp.inp.tmpl @@ -5,7 +5,7 @@ $ Start Time DT NSteps $ Total number of grids 2 $ Grid extensions - 'uglo_m1g16' + 'uglo_15km' 'wc_10m' $ 0 diff --git a/parm/wave/ww3_shel.inp.tmpl b/parm/wave/ww3_shel.inp.tmpl deleted file mode 100644 index 0b9b335e1b..0000000000 --- a/parm/wave/ww3_shel.inp.tmpl +++ /dev/null @@ -1,42 +0,0 @@ -$ -------------------------------------------------------------------- $ -$ WAVEWATCH III shel input file $ -$ -------------------------------------------------------------------- $ -$ Include ice and mud parameters only if IC1/2/3/4 used : - F F Water levels - CURRLINE - WINDLINE - ICELINE - F F Atmospheric momentum - F F Air density - F Assimilation data : Mean parameters - F Assimilation data : 1-D spectra - F Assimilation data : 2-D spectra -$ - RUN_BEG - RUN_END -$ -$ IOSTYP - IOSRV -$ - OUT_BEG DTFLD OUT_END GOFILETYPE - N - OUTPARS -$ - OUT_BEG DTPNT OUT_END POFILETYPE -BUOY_FILE -$ - OUT_BEG 0 OUT_END -$ -$ Keep next two lines formatting as is to allow proper parsing - RST_BEG DTRST RST_END RSTTYPE -RST_2_BEG DT_2_RST RST_2_END -$ - OUT_BEG 0 OUT_END -$ - OUT_BEG 0 OUT_END -$ - 'the_end' 0 -$ - 'STP' -$ -$ End of input file diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 1988fe60f6..95067ca4b4 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -175,19 +175,13 @@ declare -a ufs_templates=("model_configure.IN" "input_global_nest.nml.IN" "MOM6_data_table.IN" "ice_in.IN" "ufs.configure.atm.IN" - "ufs.configure.atm_esmf.IN" "ufs.configure.atmaero.IN" - "ufs.configure.atmaero_esmf.IN" "ufs.configure.s2s.IN" - "ufs.configure.s2s_esmf.IN" "ufs.configure.s2sa.IN" - "ufs.configure.s2sa_esmf.IN" "ufs.configure.s2sw.IN" - "ufs.configure.s2sw_esmf.IN" "ufs.configure.s2swa.IN" - "ufs.configure.s2swa_esmf.IN" "ufs.configure.leapfrog_atm_wav.IN" - "ufs.configure.leapfrog_atm_wav_esmf.IN" + "ww3_shel.nml.IN" "post_itag_gfs") for file in "${ufs_templates[@]}"; do [[ -s "${file}" ]] && rm -f "${file}" diff --git a/sorc/ufs_model.fd b/sorc/ufs_model.fd index 63ace62a36..76471dc6b7 160000 --- a/sorc/ufs_model.fd +++ b/sorc/ufs_model.fd @@ -1 +1 @@ -Subproject commit 63ace62a36a263f03b914a92fc5536509e862dbc +Subproject commit 76471dc6b7bfc3342416d1a3402f360724f7c0fa diff --git a/ush/forecast_postdet.sh b/ush/forecast_postdet.sh index 64cb14a3ec..7b9bd0ee48 100755 --- a/ush/forecast_postdet.sh +++ b/ush/forecast_postdet.sh @@ -341,10 +341,9 @@ FV3_out() { # shellcheck disable=SC2034 WW3_postdet() { echo "SUB ${FUNCNAME[0]}: Linking input data for WW3" - local ww3_grid first_ww3_restart_out ww3_restart_file # Copy initial condition files: - local restart_date restart_dir + local restart_date restart_dir seconds if [[ "${RERUN}" == "YES" ]]; then restart_date="${RERUN_DATE}" restart_dir="${DATArestart}/WW3_RESTART" @@ -354,29 +353,64 @@ WW3_postdet() { fi echo "Copying WW3 restarts for 'RUN=${RUN}' at '${restart_date}' from '${restart_dir}'" - ww3_restart_file="${restart_dir}/${restart_date:0:8}.${restart_date:8:2}0000.restart.ww3" - if [[ -s "${ww3_restart_file}" ]]; then - ${NCP} "${ww3_restart_file}" "${DATA}/restart.ww3" \ - || ( echo "FATAL ERROR: Unable to copy WW3 IC, ABORT!"; exit 1 ) - first_ww3_restart_out=$(date --utc -d "${restart_date:0:8} ${restart_date:8:2} + ${restart_interval} hours" +%Y%m%d%H) + + #First check to see if netcdf restart exists: + local ww3_binary_restart_file ww3_netcdf_restart_file + ww3_binary_restart_file="${restart_dir}/${restart_date:0:8}.${restart_date:8:2}0000.restart.ww3" + ww3_netcdf_restart_file="${restart_dir}/${restart_date:0:8}.${restart_date:8:2}0000.restart.ww3.nc" + if [[ -s "${ww3_netcdf_restart_file}" ]]; then + export WW3_restart_from_binary=false + seconds=$(to_seconds "${restart_date:8:2}0000") # convert HHMMSS to seconds + local ww3_restart_dest_file="ufs.cpld.ww3.r.${restart_date:0:4}-${restart_date:4:2}-${restart_date:6:2}-${seconds}.nc" + ${NCP} "${ww3_netcdf_restart_file}" "${DATA}/${ww3_restart_dest_file}" \ + || ( echo "FATAL ERROR: Unable to copy netcdf WW3 IC, ABORT!"; exit 1 ) + elif [[ -s "${ww3_binary_restart_file}" ]]; then + # found binary ww3 restart file + export WW3_restart_from_binary=true + if [[ -f "${DATA}/ufs.cpld.cpl.r.nc" ]]; then + #if this is a cmeps continue then the wave restart name is different + seconds=$(to_seconds "${restart_date:8:2}0000") # convert HHMMSS to seconds + local ww3_restart_dest_file="ufs.cpld.ww3.r.${restart_date:0:4}-${restart_date:4:2}-${restart_date:6:2}-${seconds}" + ${NCP} "${ww3_binary_restart_file}" "${DATA}/${ww3_restart_dest_file}" \ + || ( echo "FATAL ERROR: Unable to copy binary WW3 IC, ABORT!"; exit 1 ) + else + ${NCP} "${ww3_binary_restart_file}" "${DATA}/restart.ww3" \ + || ( echo "FATAL ERROR: Unable to copy binary WW3 IC, ABORT!"; exit 1 ) + fi else if [[ "${RERUN}" == "YES" ]]; then # In the case of a RERUN, the WW3 restart file is required - echo "FATAL ERROR: WW3 restart file '${ww3_restart_file}' not found for RERUN='${RERUN}', ABORT!" + echo "FATAL ERROR: WW3 binary | netcdf restart file '${ww3_binary_restart_file}' | '${ww3_netcdf_restart_file}' not found for RERUN='${RERUN}', ABORT!" exit 1 else - echo "WARNING: WW3 restart file '${ww3_restart_file}' not found for warm_start='${warm_start}', will start from rest!" - first_ww3_restart_out=${model_start_date_current_cycle} + echo "WARNING: WW3 binary | netcdf restart file '${ww3_binary_restart_file}' | '${ww3_netcdf_restart_file}' not found for warm_start='${warm_start}', will start from rest!" + export WW3_restart_from_binary=true fi fi + first_ww3_restart_out=$(date --utc -d "${restart_date:0:8} ${restart_date:8:2} + ${restart_interval} hours" +%Y%m%d%H) + if [[ "${DOIAU:-NO}" == "YES" ]]; then + first_ww3_restart_out=$(date --utc -d "${first_ww3_restart_out:0:8} ${first_ww3_restart_out:8:2} + ${half_window} hours" +%Y%m%d%H) + fi + # Link restart files for (( vdate = first_ww3_restart_out; vdate <= forecast_end_cycle; vdate = $(date --utc -d "${vdate:0:8} ${vdate:8:2} + ${restart_interval} hours" +%Y%m%d%H) )); do - ww3_restart_file="${vdate:0:8}.${vdate:8:2}0000.restart.ww3" - ${NLN} "${DATArestart}/WW3_RESTART/${ww3_restart_file}" "${ww3_restart_file}" + seconds=$(to_seconds "${vdate:8:2}0000") # convert HHMMSS to seconds + ww3_restart_ufs_file="ufs.cpld.ww3.r.${vdate:0:4}-${vdate:4:2}-${vdate:6:2}-${seconds}.nc" + ww3_netcdf_restart_file="${vdate:0:8}.${vdate:8:2}0000.restart.ww3.nc" + ${NLN} "${DATArestart}/WW3_RESTART/${ww3_netcdf_restart_file}" "${ww3_restart_ufs_file}" done + # TO DO: link GEFS restart for next cycle IC + #if [[ "${RUN}" == "gefs" ]]; then + # vdate=${model_start_date_next_cycle} + # seconds=$(to_seconds "${vdate:8:2}0000") # convert HHMMSS to seconds + # ww3_restart_ufs_file="ufs.cpld.ww3.r.${vdate:0:4}-${vdate:4:2}-${vdate:6:2}-${seconds}.nc" + # ww3_netcdf_restart_file="${vdate:0:8}.${vdate:8:2}0000.restart.ww3.nc" + # ${NLN} "${DATArestart}/WW3_RESTART/${ww3_netcdf_restart_file}" "${ww3_restart_ufs_file}" + #fi + # Link output files local wavprfx="${RUN}wave${WAV_MEMBER:-}" ${NLN} "${COMOUT_WAVE_HISTORY}/${wavprfx}.log.${waveGRD}.${PDY}${cyc}" "log.ww3" @@ -414,7 +448,42 @@ WW3_nml() { WW3_out() { echo "SUB ${FUNCNAME[0]}: Copying output data for WW3" - # TODO: Need to add logic to copy restarts from DATArestart/WW3_RESTART to COMOUT_WAVE_RESTART + + # Copy wave namelist from DATA to COMOUT_CONF after the forecast is run (and successfull) + ${NCP} "${DATA}/ww3_shel.nml" "${COMOUT_CONF}/ufs.ww3_shel.nml" + + # Copy WW3 restarts at the end of the forecast segment to COM for RUN=gfs|gefs + if [[ "${COPY_FINAL_RESTARTS}" == "YES" ]]; then + local restart_file + if [[ "${RUN}" == "gfs" || "${RUN}" == "gefs" ]]; then + echo "Copying WW3 restarts for 'RUN=${RUN}' at ${forecast_end_cycle}" + restart_file="${forecast_end_cycle:0:8}.${forecast_end_cycle:8:2}0000.restart.ww3.nc" + ${NCP} "${DATArestart}/WW3_RESTART/${restart_file}" \ + "${COMOUT_WAVE_RESTART}/${restart_file}" + fi + fi + + # Copy restarts for next cycle for RUN=gdas|gefs + #TO DO: GEFS needs to be added here + if [[ "${RUN}" == "gdas" ]]; then + local restart_date restart_file + restart_date="${model_start_date_next_cycle}" + echo "Copying WW3 restarts for 'RUN=${RUN}' at ${restart_date}" + restart_file="${restart_date:0:8}.${restart_date:8:2}0000.restart.ww3.nc" + ${NCP} "${DATArestart}/WW3_RESTART/${restart_file}" \ + "${COMOUT_WAVE_RESTART}/${restart_file}" + fi + + # Copy restarts for downstream usage in HAFS + if [[ "${RUN}" == "gdas" ]]; then + local restart_date restart_file + restart_date="${next_cycle}" + echo "Copying WW3 restarts for 'RUN=${RUN}' at ${restart_date}" + restart_file="${restart_date:0:8}.${restart_date:8:2}0000.restart.ww3.nc" + ${NCP} "${DATArestart}/WW3_RESTART/${restart_file}" \ + "${COMOUT_WAVE_RESTART}/${restart_file}" + fi + } diff --git a/ush/forecast_predet.sh b/ush/forecast_predet.sh index fbe26595a9..e08b84d932 100755 --- a/ush/forecast_predet.sh +++ b/ush/forecast_predet.sh @@ -707,6 +707,7 @@ MOM6_predet(){ } +# shellcheck disable=SC2178 CMEPS_predet(){ echo "SUB ${FUNCNAME[0]}: CMEPS before run type determination" @@ -715,6 +716,29 @@ CMEPS_predet(){ if [[ ! -d "${DATArestart}/CMEPS_RESTART" ]]; then mkdir -p "${DATArestart}/CMEPS_RESTART"; fi ${NLN} "${DATArestart}/CMEPS_RESTART" "${DATA}/CMEPS_RESTART" + # For CMEPS, CICE, MOM6 and WW3 determine restart writes + # Note FV3 has its own restart intervals + cmeps_restart_interval=${restart_interval:-${FHMAX}} + # restart_interval = 0 implies write restart at the END of the forecast i.e. at FHMAX + # Convert restart interval into an explicit list for FV3 + if (( cmeps_restart_interval == 0 )); then + if [[ "${DOIAU:-NO}" == "YES" ]]; then + CMEPS_RESTART_FH=$(( FHMAX + half_window )) + else + CMEPS_RESTART_FH=("${FHMAX}") + fi + else + if [[ "${DOIAU:-NO}" == "YES" ]]; then + local restart_interval_start=$(( cmeps_restart_interval + half_window )) + local restart_interval_end=$(( FHMAX + half_window )) + else + local restart_interval_start=${cmeps_restart_interval} + local restart_interval_end=${FHMAX} + fi + CMEPS_RESTART_FH="$(seq -s ' ' "${restart_interval_start}" "${cmeps_restart_interval}" "${restart_interval_end}")" + fi + export CMEPS_RESTART_FH + # TODO: For GEFS, once cycling waves "self-cycles" and therefore needs to have a restart at 6 hour } # shellcheck disable=SC2034 diff --git a/ush/parsing_model_configure_FV3.sh b/ush/parsing_model_configure_FV3.sh index 8033d7686a..d28048f098 100755 --- a/ush/parsing_model_configure_FV3.sh +++ b/ush/parsing_model_configure_FV3.sh @@ -25,6 +25,7 @@ local SHOUR=${model_start_date:8:2} local FHROT=${IAU_FHROT:-0} local DT_ATMOS=${DELTIM} local RESTART_INTERVAL="${FV3_RESTART_FH[*]}" +local RESTART_FH="${CMEPS_RESTART_FH:-" "}" # QUILTING local QUILTING_RESTART="${QUILTING_RESTART:-${QUILTING}}" local WRITE_GROUP=${WRITE_GROUP:-1} diff --git a/ush/parsing_namelists_WW3.sh b/ush/parsing_namelists_WW3.sh index 5ee4944c18..67bffb1967 100755 --- a/ush/parsing_namelists_WW3.sh +++ b/ush/parsing_namelists_WW3.sh @@ -4,158 +4,61 @@ WW3_namelists(){ # WW3 namelists/input generation - FHMAX_WAV=${FHMAX_WAV:-384} - - # Date and time stuff - - # Beginning time for outpupt may differ from SDATE if DOIAU=YES - export date=$PDY - export YMDH=${PDY}${cyc} - # Roll back $IAU_FHROT hours of DOIAU=YES - if [ "$DOIAU" = "YES" ] - then - WAVHINDH=$(( WAVHINDH + IAU_FHROT )) - fi - # Set time stamps for model start and output - # For special case when IAU is on but this is an initial half cycle - if [ ${IAU_OFFSET:-0} = 0 ]; then - ymdh_beg=$YMDH - else - ymdh_beg=$($NDATE -$WAVHINDH $YMDH) - fi - time_beg="$(echo $ymdh_beg | cut -c1-8) $(echo $ymdh_beg | cut -c9-10)0000" - ymdh_end=$($NDATE $FHMAX_WAV $YMDH) - time_end="$(echo $ymdh_end | cut -c1-8) $(echo $ymdh_end | cut -c9-10)0000" - ymdh_beg_out=$YMDH - time_beg_out="$(echo $ymdh_beg_out | cut -c1-8) $(echo $ymdh_beg_out | cut -c9-10)0000" - - # Restart file times (already has IAU_FHROT in WAVHINDH) - RSTOFFSET=$(( ${WAVHCYC} - ${WAVHINDH} )) - # Update restart time is added offset relative to model start - RSTOFFSET=$(( ${RSTOFFSET} + ${RSTIOFF_WAV} )) - ymdh_rst_ini=$($NDATE ${RSTOFFSET} $YMDH) - RST2OFFSET=$(( DT_2_RST_WAV / 3600 )) - ymdh_rst2_ini=$($NDATE ${RST2OFFSET} $YMDH) # DT2 relative to first-first-cycle restart file - # First restart file for cycling - time_rst_ini="$(echo $ymdh_rst_ini | cut -c1-8) $(echo $ymdh_rst_ini | cut -c9-10)0000" - if [ ${DT_1_RST_WAV} = 1 ]; then - time_rst1_end=${time_rst_ini} - else - RST1OFFSET=$(( DT_1_RST_WAV / 3600 )) - ymdh_rst1_end=$($NDATE $RST1OFFSET $ymdh_rst_ini) - time_rst1_end="$(echo $ymdh_rst1_end | cut -c1-8) $(echo $ymdh_rst1_end | cut -c9-10)0000" - fi - # Second restart file for checkpointing - if [ "${RSTTYPE_WAV}" = "T" ]; then - time_rst2_ini="$(echo $ymdh_rst2_ini | cut -c1-8) $(echo $ymdh_rst2_ini | cut -c9-10)0000" - time_rst2_end=$time_end - # Condition for gdas run or any other run when checkpoint stamp is > ymdh_end - if [ $ymdh_rst2_ini -ge $ymdh_end ]; then - ymdh_rst2_ini=$($NDATE 3 $ymdh_end) - time_rst2_ini="$(echo $ymdh_rst2_ini | cut -c1-8) $(echo $ymdh_rst2_ini | cut -c9-10)0000" - time_rst2_end=$time_rst2_ini - fi - else - time_rst2_ini="$" - time_rst2_end= - DT_2_RST_WAV= - fi - - - set +x - echo ' ' - echo 'Times in wave model format :' - echo '----------------------------' - echo " date / cycle : $date $cycle" - echo " starting time : $time_beg" - echo " ending time : $time_end" - echo ' ' - set_trace - - + FHMAX_WAV="${FHMAX_WAV:-384}" # --------------------------------------------------------------------------- # -# Create ww3_shel.inp - - if [ -f "${PARMgfs}/wave/ww3_shel.inp.tmpl" ]; then - cp "${PARMgfs}/wave/ww3_shel.inp.tmpl" "ww3_shel.inp.tmpl" - fi - if [ ! -f ww3_shel.inp.tmpl ]; then - echo "ABNORMAL EXIT: NO TEMPLATE FOR WW3 SHEL INPUT FILE" - exit 12 - fi - # Buoy location file - if [ -f ${PARMgfs}/wave/wave_${NET}.buoys ] + if [ -f "${PARMgfs}/wave/wave_${NET}.buoys" ] then - cp ${PARMgfs}/wave/wave_${NET}.buoys buoy.loc + ${NCP} "${PARMgfs}/wave/wave_${NET}.buoys" "${DATA}/ww3_points.list" fi - if [ -f buoy.loc ] + if [ -f "${DATA}/ww3_points.list" ] then set +x - echo " buoy.loc copied (${PARMgfs}/wave/wave_${NET}.buoys)." + echo "ww3_points.list copied (${PARMgfs}/wave/wave_${NET}.buoys)." set_trace else - echo " FATAL ERROR : buoy.loc (${PARMgfs}/wave/wave_${NET}.buoys) NOT FOUND" + echo "FATAL ERROR : ww3_points.list (${PARMgfs}/wave/wave_${NET}.buoys) NOT FOUND" exit 12 fi -# Initialize inp file parameters -ICELINE='F F' -CURRLINE='F F' -WINDLINE='F F' - -case ${WW3ATMINP} in - 'YES' ) - WINDLINE="T F";; - 'CPL' ) - WINDLINE="C F";; -esac - -case ${WW3ICEINP} in - 'YES' ) - ICELINE="T F";; - 'CPL' ) - ICELINE="C F";; -esac - -case ${WW3CURINP} in - 'YES' ) - CURRLINE="T F";; - 'CPL' ) - CURRLINE="C F";; -esac - -sed -e "s/IOSRV/${IOSRV}/g" \ - -e "s/OUTPARS/${OUTPARS_WAV}/g" \ - -e "s/ICELINE/$ICELINE/g" \ - -e "s/CURRLINE/$CURRLINE/g" \ - -e "s/WINDLINE/$WINDLINE/g" \ - -e "s/RUN_BEG/$time_beg/g" \ - -e "s/RUN_END/$time_end/g" \ - -e "s/OUT_BEG/$time_beg_out/g" \ - -e "s/OUT_END/$time_end/g" \ - -e "s/DTFLD/ $DTFLD_WAV/g" \ - -e "s/GOFILETYPE/ $GOFILETYPE/g" \ - -e "s/POFILETYPE/ $POFILETYPE/g" \ - -e "s/DTPNT/ $DTPNT_WAV/g" \ - -e "s/DTPNT/ $DTPNT_WAV/g" \ - -e "/BUOY_FILE/r buoy.loc" \ - -e "s/BUOY_FILE/DUMMY/g" \ - -e "s/RST_BEG/$time_rst_ini/g" \ - -e "s/RSTTYPE/$RSTTYPE_WAV/g" \ - -e "s/RST_2_BEG/$time_rst2_ini/g" \ - -e "s/DTRST/$DT_1_RST_WAV/g" \ - -e "s/DT_2_RST/$DT_2_RST_WAV/g" \ - -e "s/RST_END/$time_rst1_end/g" \ - -e "s/RST_2_END/$time_rst2_end/g" \ - ww3_shel.inp.tmpl | \ -sed -n "/DUMMY/!p" > ww3_shel.inp - -rm -f ww3_shel.inp.tmpl buoy.loc - -cat ww3_shel.inp + #set coupling to ice/current + WW3_ICE="F" + WW3_CUR="F" + + case ${WW3ICEINP} in + 'YES' ) + WW3_ICE="T";; + 'CPL' ) + WW3_ICE="C";; + esac + + case ${WW3CURINP} in + 'YES' ) + WW3_CUR="T";; + 'CPL' ) + WW3_CUR="C";; + esac + + # Variables used in atparse of shel template + export WW3_IC1="F" + export WW3_IC5="F" + export WW3_ICE + export WW3_CUR + export WW3_OUTPARS="${OUTPARS_WAV}" + export WW3_DTFLD="${DTFLD_WAV}" + export WW3_DTPNT="${DTPNT_WAV}" + # Ensure the template exists + local template=${WW3_INPUT_TEMPLATE:-"${PARMgfs}/ufs/ww3_shel.nml.IN"} + if [[ ! -f "${template}" ]]; then + echo "FATAL ERROR: template '${template}' does not exist, ABORT!" + exit 1 + fi + rm -f "${DATA}/ww3_shel.nml" + atparse < "${template}" >> "${DATA}/ww3_shel.nml" + echo "Rendered ww3_shel.nml:" + cat "${DATA}/ww3_shel.nml" } diff --git a/ush/parsing_ufs_configure.sh b/ush/parsing_ufs_configure.sh index 7ee699ef0a..f5e5857830 100755 --- a/ush/parsing_ufs_configure.sh +++ b/ush/parsing_ufs_configure.sh @@ -53,12 +53,14 @@ if [[ "${cplflx}" = ".true." ]]; then local CMEPS_RESTART_DIR="CMEPS_RESTART/" local CPLMODE="${cplmode}" local coupling_interval_fast_sec="${CPL_FAST}" - local RESTART_N="${restart_interval}" + local RESTART_N=999999 local ocean_albedo_limit=0.06 local ATMTILESIZE="${CASE:1}" local ocean_albedo_limit=0.06 local pio_rearranger=${pio_rearranger:-"box"} local MED_history_n=1000000 + + local histaux_enabled=".false." fi if [[ "${cplice}" = ".true." ]]; then @@ -74,12 +76,10 @@ if [[ "${cplwav}" = ".true." ]]; then local wav_model="ww3" local wav_petlist_bounds="$(( ATMPETS+OCNPETS+ICEPETS )) $(( ATMPETS+OCNPETS+ICEPETS+WAVPETS-1 ))" local wav_omp_num_threads="${WAVTHREADS}" - local WW3_user_sets_restname="false" local WW3_user_histname="false" local WW3_historync="false" - local WW3_restartnc="false" - local WW3_restart_from_binary="false" + local WW3_restartnc="true" local WW3_PIO_FORMAT="pnetcdf" local WW3_PIO_IOTASKS=-99 local WW3_PIO_STRIDE=4 @@ -97,6 +97,13 @@ if [[ "${cplchm}" = ".true." ]]; then fi +#Set ESMF_THREADING variable for ufs configure +if [[ "${USE_ESMF_THREADING}" = "YES" ]]; then + local ESMF_THREADING="true" +else + local ESMF_THREADING="false" +fi + # Ensure the template exists if [[ ! -r "${ufs_configure_template}" ]]; then echo "FATAL ERROR: template '${ufs_configure_template}' does not exist, ABORT!" diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index 45a7bccc7a..7a2c812a0d 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -122,7 +122,7 @@ def _get_app_configs(self, run): configs += ['awips', 'fbwind'] if options['do_wave']: - configs += ['waveinit', 'waveprep', 'wavepostsbs', 'wavepostpnt'] + configs += ['waveinit', 'wavepostsbs', 'wavepostpnt'] if options['do_wave_bnd']: configs += ['wavepostbndpnt', 'wavepostbndpntbll'] if options['do_gempak']: @@ -187,7 +187,7 @@ def get_task_names(self): if options['do_jedisnowda']: task_names[run] += ['snowanl'] - wave_prep_tasks = ['waveinit', 'waveprep'] + wave_prep_tasks = ['waveinit'] wave_bndpnt_tasks = ['wavepostbndpnt', 'wavepostbndpntbll'] wave_post_tasks = ['wavepostsbs', 'wavepostpnt'] diff --git a/workflow/applications/gfs_forecast_only.py b/workflow/applications/gfs_forecast_only.py index 5b397c105b..de1c8cef27 100644 --- a/workflow/applications/gfs_forecast_only.py +++ b/workflow/applications/gfs_forecast_only.py @@ -67,7 +67,7 @@ def _get_app_configs(self, run): configs += ['oceanice_products'] if options['do_wave']: - configs += ['waveinit', 'waveprep', 'wavepostsbs', 'wavepostpnt'] + configs += ['waveinit', 'wavepostsbs', 'wavepostpnt'] if options['do_wave_bnd']: configs += ['wavepostbndpnt', 'wavepostbndpntbll'] if options['do_gempak']: diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 2094c0b194..768512ba39 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -931,6 +931,11 @@ def _fcst_cycled(self): dep = rocoto.add_dependency(dep_dict) dependencies = rocoto.create_dependency(dep=dep) + if self.options['do_wave']: + wave_job = 'waveprep' if self.options['app'] in ['ATMW'] else 'waveinit' + dep_dict = {'type': 'task', 'name': f'{self.run}_{wave_job}'} + dependencies.append(rocoto.add_dependency(dep_dict)) + if self.options['do_jediocnvar']: dep_dict = {'type': 'task', 'name': f'{self.run}_marineanlfinal'} dependencies.append(rocoto.add_dependency(dep_dict)) @@ -950,11 +955,6 @@ def _fcst_cycled(self): dependencies.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep_condition='or', dep=dependencies) - if self.options['do_wave']: - dep_dict = {'type': 'task', 'name': f'{self.run}_waveprep'} - dependencies.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies) - cycledef = 'gdas_half,gdas' if self.run in ['gdas'] else self.run if self.run in ['gfs']: From 01eeb243f7b4242bc46e1794c4afd2a64d2afa7f Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Fri, 17 Jan 2025 02:50:04 -0500 Subject: [PATCH 31/33] CI JJOB Tests using CMake (#3214) Adding CI tests at the JJOB level using CMake/ctest wrappers: These individual JJOB tests have four distinct phases: - **Setup:** Creates a EXPDIR/COMROOT just for the individual JJOB - **Stage:** Moves the specific files into the COMROOT that are needed to run the JJOB specified in `${HOMEgfs}/ci/ctest/cases/{CASE}_{JJOB}.yaml` - **Execute:** Run the JJOB in batch (batch card extracted from XML via Rocoto) - **Validate:** Check the outputs also specified in the above yaml configure file (currently stubbed) Resolves #3204 --------- Co-authored-by: Terry McGuinness Co-authored-by: Rahul Mahajan --- CMakeLists.txt | 27 ++++++ ci/platforms/config.hera | 2 + ci/platforms/config.orion | 1 + ctests/CMakeLists.txt | 106 ++++++++++++++++++++++++ ctests/README.md | 58 +++++++++++++ ctests/cases/C48_ATM_gfs_fcst_seg0.yaml | 17 ++++ ctests/scripts/execute.sh.in | 66 +++++++++++++++ ctests/scripts/setup.sh.in | 31 +++++++ ctests/scripts/stage.py | 67 +++++++++++++++ ctests/scripts/stage.sh.in | 36 ++++++++ ctests/scripts/validate.sh.in | 9 ++ modulefiles/module_gwsetup.hercules.lua | 1 + modulefiles/module_gwsetup.orion.lua | 2 + 13 files changed, 423 insertions(+) create mode 100644 CMakeLists.txt create mode 100644 ctests/CMakeLists.txt create mode 100644 ctests/README.md create mode 100644 ctests/cases/C48_ATM_gfs_fcst_seg0.yaml create mode 100755 ctests/scripts/execute.sh.in create mode 100755 ctests/scripts/setup.sh.in create mode 100755 ctests/scripts/stage.py create mode 100755 ctests/scripts/stage.sh.in create mode 100755 ctests/scripts/validate.sh.in diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000000..5044689f7e --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- # +# Global Workflow +# ------------------------------------------------------------------------- # + +# Check for minimum cmake requirement +cmake_minimum_required( VERSION 3.20 FATAL_ERROR ) + +project(global_workflow VERSION 1.0.0) + +include(GNUInstallDirs) +enable_testing() + +# Build type. +if(NOT CMAKE_BUILD_TYPE MATCHES "^(Debug|Release|RelWithDebInfo|MinSizeRel)$") + message(STATUS "Setting build type to 'Release' as none was specified.") + set(CMAKE_BUILD_TYPE + "Release" + CACHE STRING "Choose the type of build." FORCE) + set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" + "MinSizeRel" "RelWithDebInfo") +endif() + +# Build global-workflow source codes +# add_subdirectory(sorc) + +# Setup tests +add_subdirectory(ctests) diff --git a/ci/platforms/config.hera b/ci/platforms/config.hera index 6d3e43c820..09e2e28ddb 100644 --- a/ci/platforms/config.hera +++ b/ci/platforms/config.hera @@ -2,6 +2,8 @@ export GFS_CI_ROOT=/scratch1/NCEPDEV/global/Terry.McGuinness/GFS_CI_ROOT export ICSDIR_ROOT=/scratch1/NCEPDEV/global/glopara/data/ICSDIR + +export STAGED_TESTS_DIR=${GFS_CI_ROOT}/STAGED_TESTS_DIR export HPC_ACCOUNT=nems export max_concurrent_cases=5 export max_concurrent_pr=4 diff --git a/ci/platforms/config.orion b/ci/platforms/config.orion index 5171373127..507068d4e7 100644 --- a/ci/platforms/config.orion +++ b/ci/platforms/config.orion @@ -2,6 +2,7 @@ export GFS_CI_ROOT=/work2/noaa/stmp/GFS_CI_ROOT/ORION export ICSDIR_ROOT=/work/noaa/global/glopara/data/ICSDIR +export STAGED_TESTS_DIR=${GFS_CI_ROOT}/STAGED_TESTS_DIR export HPC_ACCOUNT=nems export max_concurrent_cases=5 export max_concurrent_pr=4 diff --git a/ctests/CMakeLists.txt b/ctests/CMakeLists.txt new file mode 100644 index 0000000000..f8d928f456 --- /dev/null +++ b/ctests/CMakeLists.txt @@ -0,0 +1,106 @@ +# ------------------------------------------------------------------------- # +# CTests for Global Workflow +# ------------------------------------------------------------------------- # +# These ctests correspond to JJOBs (individual Rocoto jobs) that can be +# run independently, each requiring its own YAML definition of inputs +# and configurations. By integrating with Rocoto, these jobs can be +# validated, staged, and executed as self-contained tests using +# their own data and test parameters. +# ------------------------------------------------------------------------- # + +# Function to set a variable from an environment variable or default value +function(set_from_env_or_default VAR_NAME ENV_VAR DEFAULT_VALUE) + if (DEFINED ENV{${ENV_VAR}} AND NOT DEFINED ${VAR_NAME}) + set(${VAR_NAME} $ENV{${ENV_VAR}} CACHE STRING "Set from environment variable ${ENV_VAR}") + elseif(NOT DEFINED ${VAR_NAME} AND NOT ${DEFAULT_VALUE} STREQUAL "") + set(${VAR_NAME} ${DEFAULT_VALUE} CACHE STRING "Default value for ${VAR_NAME}") + endif() +endfunction() + +# Set HOMEgfs +if (NOT DEFINED HOMEgfs) + set(HOMEgfs ${PROJECT_SOURCE_DIR}) +endif() + +# Set RUNTESTS +set_from_env_or_default(RUNTESTS RUNTESTS "${CMAKE_CURRENT_BINARY_DIR}/RUNTESTS") + +# Set HPC_ACCOUNT +set_from_env_or_default(HPC_ACCOUNT HPC_ACCOUNT " ") +if (NOT DEFINED HPC_ACCOUNT) + message(WARNING "HPC_ACCOUNT must be set. CTests will not be created.") + return() +endif() + +# Set ICSDIR_ROOT +set_from_env_or_default(ICSDIR_ROOT ICSDIR_ROOT "") +if (NOT DEFINED ICSDIR_ROOT) + message(WARNING "ICSDIR_ROOT must be set. CTests will not be created.") + return() +endif() + +# Set STAGED_TESTS_DIR +set_from_env_or_default(STAGED_TESTS_DIR STAGED_TESTS_DIR "") +if (NOT DEFINED STAGED_TESTS_DIR) + message(WARNING "STAGED_TESTS_DIR must be set. CTests will not be created.") + return() +endif() + +message(STATUS "gw: global-workflow baselines will be used from: '${HOMEgfs}'") +message(STATUS "gw: global-workflow tests will be run at: '${RUNTESTS}'") +message(STATUS "gw: global-workflow tests will use the allocation: '${HPC_ACCOUNT}'") +message(STATUS "gw: global-workflow tests will use ICSDIR_ROOT: '${ICSDIR_ROOT}'") +message(STATUS "gw: global-workflow tests will use staged data from: '${STAGED_TESTS_DIR}'") + +# Prepare test scripts +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/scripts/setup.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/scripts/setup.sh @ONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/scripts/stage.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/scripts/stage.sh @ONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/scripts/execute.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/scripts/execute.sh @ONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/scripts/validate.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/scripts/validate.sh @ONLY) + +function(AddJJOBTest) + + set(prefix ARG) + set(novals NOTRAPFPE NOVALGRIND) + set(singlevals CASE JOB TEST_DATE) + set(multivals TEST_DEPENDS) + + cmake_parse_arguments(${prefix} + "${novals}" "${singlevals}" "${multivals}" + ${ARGN}) + + set(TEST_NAME ${ARG_CASE}_${ARG_JOB}) + set(CASE_PATH ${HOMEgfs}/ci/cases/pr) + set(CASE_YAML ${CASE_PATH}/${ARG_CASE}.yaml) + + add_test(NAME test_${TEST_NAME}_setup + COMMAND ./setup.sh ${TEST_NAME} ${CASE_YAML} ${ARG_TEST_DATE} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/scripts) + set_tests_properties(test_${TEST_NAME}_setup PROPERTIES LABELS "${ARG_CASE};${ARG_JOB}") + + add_test(NAME test_${TEST_NAME}_stage + COMMAND ./stage.sh ${TEST_NAME} ${ARG_TEST_DATE} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/scripts) + set_tests_properties(test_${TEST_NAME}_stage PROPERTIES DEPENDS test_${TEST_NAME}_setup LABELS "${ARG_CASE};${ARG_JOB}") + + add_test(NAME test_${TEST_NAME}_execute + COMMAND ./execute.sh ${TEST_NAME} ${ARG_JOB} ${ARG_TEST_DATE} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/scripts) + set_tests_properties(test_${TEST_NAME}_execute PROPERTIES DEPENDS test_${TEST_NAME}_stage LABELS "${ARG_CASE};${ARG_JOB}") + + # TODO - This is a stub for the validation step + add_test(NAME test_${TEST_NAME}_validate + COMMAND ./validate.sh ${TEST_NAME} ${CASE_YAML} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/scripts) + set_tests_properties(test_${TEST_NAME}_validate PROPERTIES DEPENDS test_${TEST_NAME}_execute LABELS "${ARG_CASE};${ARG_JOB}") +endfunction() + +AddJJOBTest( + CASE "C48_ATM" + JOB "gfs_fcst_seg0" + TEST_DATE "2021032312" +) diff --git a/ctests/README.md b/ctests/README.md new file mode 100644 index 0000000000..95a32cd952 --- /dev/null +++ b/ctests/README.md @@ -0,0 +1,58 @@ +# CTest Framework for NOAA Global Workflow + +This directory contains the CTest framework for testing Rocoto JJOBS. The framework allows you to stage, execute, and validate individual JJOBS independently from other jobs in the workflow. Each test requires its own YAML definition of inputs and configurations. + +## Overview + +The CTest framework consists of the following scripts: +- **setup.sh.in**: Prepares the environment and creates the experiment. +- **stage.sh.in**: Stages the input files needed to run a JJOB. +- **execute.sh.in**: Executes the JJOB and monitors its status. +- **validate.sh.in**: (TODO) Validates the results of the JJOB. + +## Usage + +### CMake Configuration + +To configure the CTest framework using CMake, you need to provide several environment variables or default values. Here is an example of how to configure and build the project: + +```bash +# Set environment variables (may also be include at command line with -D) +export HPC_ACCOUNT="your_hpc_account" +export ICSDIR_ROOT="/path/to/icsdir_root" +export STAGED_TESTS_DIR="/path/to/staged_tests_dir" + +# Run CMake to configure the ctest framework +cmake -S /path/to/HOMEgfs -B /path/to/build -DRUNTESTS=/path/to/runtests + +``` + +### Running Tests with CTest + +Once the project is configured, you can run the tests using CTest. Here are some examples: + +#### Run All Tests + +```bash +cd /path/to/build +ctest +``` + +#### Run Tests for a Specific Case + +You can use the `-L` option with CTest to run tests for a specific case. For example, to run tests for the `C48_ATM` case: + +```bash +cd /path/to/build +ctest -L C48_ATM +``` + +To add a new test use the **AddJJOBTest()** function at the end of the `$HOMEgfs/ctest/CMakeLists.txt` file as follows: +```cmake +AddJJOBTest( + CASE "C48_ATM" + JOB "gfs_fcst_seg0" + TEST_DATE "2021032312" +) +``` +Then create a new YAML file with the required staged input files as is done with this example found in `$HOMEgfs/ctests/cases/C48_ATM_gfs_fcts_seg0.yaml` diff --git a/ctests/cases/C48_ATM_gfs_fcst_seg0.yaml b/ctests/cases/C48_ATM_gfs_fcst_seg0.yaml new file mode 100644 index 0000000000..ec0ce88ff1 --- /dev/null +++ b/ctests/cases/C48_ATM_gfs_fcst_seg0.yaml @@ -0,0 +1,17 @@ +input_files: + mkdir: + - "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input" + copy: + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/gfs_ctrl.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/gfs_ctrl.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/gfs_data.tile1.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/gfs_data.tile1.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/gfs_data.tile2.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/gfs_data.tile2.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/gfs_data.tile3.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/gfs_data.tile3.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/gfs_data.tile4.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/gfs_data.tile4.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/gfs_data.tile5.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/gfs_data.tile5.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/gfs_data.tile6.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/gfs_data.tile6.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/sfc_data.tile1.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/sfc_data.tile1.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/sfc_data.tile2.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/sfc_data.tile2.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/sfc_data.tile3.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/sfc_data.tile3.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/sfc_data.tile4.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/sfc_data.tile4.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/sfc_data.tile5.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/sfc_data.tile5.nc"] + - ["{{ 'STAGED_TESTS_DIR' | getenv }}/{{ 'TEST_NAME' | getenv }}/input_files/gfs.{{ TEST_DATE | to_YMD }}/{{ TEST_DATE | strftime('%H') }}/model/atmos/input/sfc_data.tile6.nc", "{{ 'RUNTESTS' | getenv }}/COMROOT/{{ 'TEST_NAME' | getenv }}/gfs.{{TEST_DATE | to_YMD}}/{{TEST_DATE | strftime('%H')}}/model/atmos/input/sfc_data.tile6.nc"] diff --git a/ctests/scripts/execute.sh.in b/ctests/scripts/execute.sh.in new file mode 100755 index 0000000000..9cf3ef5917 --- /dev/null +++ b/ctests/scripts/execute.sh.in @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +set -xe + +TEST_NAME=${1:?"Name of the test is required"} +JOB=${2:?"Job name is required"} +# TODO - adding idate by hand for now, need to get this from the test somehow +idate=$3 + +#TODO - add rocotoboot_dryrun to repo some how +rocotoboot_dryrun=/work2/noaa/global/mterry/rocoto_dryrun/bin/rocotoboot +CASEDIR="@CMAKE_CURRENT_BINARY_DIR@/RUNTESTS/EXPDIR/${TEST_NAME}" +cd "${CASEDIR}" +rm -f ./*.db +rm -f ./jobcard + +yes | "${rocotoboot_dryrun}" -d "${TEST_NAME}.db" -w "${TEST_NAME}.xml" -v 10 -c "${idate}00" -t "${JOB}" 2> jobcard || true +sed '/^{{\|^}}/d' < jobcard | sed '1d' > "${TEST_NAME}.sub" || true + +#TODO - Generalize for batch system (hard coded to slurm) + +output=$(sbatch "${TEST_NAME}.sub") +job_id=$(echo "${output}" | awk '{print $4}') +echo "Job ${job_id} submitted for test ${TEST_NAME} with job name ${JOB}" + +# First loop: wait until job appears +lack_of_job_count=0 +LACK_OF_JOB_LIMIT=5 + +while true; do + job_status=$(sacct -j "${job_id}" --format=State --noheader -n | head -1) || true + if [[ -n "${job_status}" ]]; then + echo "Job ${job_id} found in sacct." + break + fi + echo "Job ${job_id} not in sacct yet, attempt ${lack_of_job_count}/${LACK_OF_JOB_LIMIT}." + lack_of_job_count=$((lack_of_job_count + 1)) + if [[ "${lack_of_job_count}" -ge "${LACK_OF_JOB_LIMIT}" ]]; then + echo "Job ${job_id} not found after ${lack_of_job_count} attempts. Exiting." + exit 1 + fi + sleep 30 +done + +# Second loop: monitor job status until completion or failure +timeout=0 +TIMEOUT=60 +while true; do + # Trim trailing spaces from job_status + job_status=$(sacct -j "${job_id}" --format=State --noheader -n | head -1 | xargs) || true + if [[ "${job_status}" == "COMPLETED" ]]; then + echo "Job ${job_id} completed successfully." + break + elif [[ "${job_status}" =~ ^(FAILED|CANCELLED|TIMEOUT)$ ]]; then + echo "Job ${job_id} failed with status: ${job_status}." + exit 1 + else + echo "Job ${job_id} is still running with status: ${job_status}." + sleep 60 + timeout=$((timeout + 1)) + if [[ "${timeout}" -gt "${TIMEOUT}" ]]; then + echo "Job ${job_id} has been running for more than ${TIMEOUT} minutes. Exiting." + exit 1 + fi + fi +done diff --git a/ctests/scripts/setup.sh.in b/ctests/scripts/setup.sh.in new file mode 100755 index 0000000000..6c4a772b65 --- /dev/null +++ b/ctests/scripts/setup.sh.in @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -ux + +TEST_NAME=${1:?"Name of the test is required"} +YAML_FILE=${2:?"Name of the CI yaml file for the test"} + +# CMake to fill these variables +HOMEgfs="@PROJECT_SOURCE_DIR@" +RUNTESTS="@RUNTESTS@" +ICSDIR_ROOT="@ICSDIR_ROOT@" +HPC_ACCOUNT="@HPC_ACCOUNT@" + +set +x +source "${HOMEgfs}/workflow/gw_setup.sh" +set -x + +pslot="${TEST_NAME}" \ +RUNTESTS="${RUNTESTS}" \ +ICSDIR_ROOT="${ICSDIR_ROOT}" \ +HPC_ACCOUNT="${HPC_ACCOUNT}" \ +"${HOMEgfs}/workflow/create_experiment.py" --yaml "${YAML_FILE}" --overwrite +rc=$? +if [[ "${rc}" -ne 0 ]]; then + set +x + echo "Failed to create test experiment for '${TEST_NAME}' with yaml file '${YAML_FILE}'" + set -x + exit "${rc}" +fi + +exit 0 diff --git a/ctests/scripts/stage.py b/ctests/scripts/stage.py new file mode 100755 index 0000000000..b8a77a120d --- /dev/null +++ b/ctests/scripts/stage.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 + +""" +stage.py + +This script is part of the ctest framework for testing Rocoto JJOBS that stages the +input files needed to run a JJOB independently from other jobs in the workflow. +The YAML file specified at the command line contains the paths to the staged input files +and their corresponding directories under the COMROOT of the experiment for the JJOB. + +Usage: + stage.py -y [-d ] + +Arguments: + -y, --yaml Path to the YAML file describing the job test configuration (required) + -d, --test_date Test date in YYYYMMDDHH format (optional) + +Example: + ./stage.py -y /path/to/config.yaml -d 2021032312 +""" + +import os +import datetime + +from argparse import ArgumentParser +from pathlib import Path +from wxflow import parse_j2yaml, FileHandler, Logger + +# Initialize logger with environment variable for logging level +logger = Logger(level=os.environ.get("LOGGING_LEVEL", "DEBUG"), colored_log=False) + + +def parse_args(): + """ + Parse command line arguments. + + Returns + ------- + argparse.Namespace + The parsed command line arguments, including: + - yaml: Path to the YAML file describing the job test configuration. + - test_date: Optional test date in YYYYMMDDHH format. + """ + description = """Arguments for creating and updating error log files + """ + parser = ArgumentParser(description=description) + + # Add argument for YAML file path + parser.add_argument('-y', '--yaml', help='full path to yaml file describing the job test configuration', type=Path, required=True) + # Add optional argument for test date + parser.add_argument('-d', '--test_date', help='test date in YYYYMMDDHH format', type=str, required=False) + return parser.parse_args() + + +if __name__ == '__main__': + + # Parse command line arguments + args = parse_args() + + data = {} + if args.test_date: + # Parse test date from string to datetime object + data['TEST_DATE'] = datetime.datetime.strptime(args.test_date, '%Y%m%d%H') + # Parse YAML configuration file with optional data + case_cfg = parse_j2yaml(path=args.yaml, data=data) + # Synchronize input files as per the parsed configuration + FileHandler(case_cfg.input_files).sync() diff --git a/ctests/scripts/stage.sh.in b/ctests/scripts/stage.sh.in new file mode 100755 index 0000000000..9ced3d8f4e --- /dev/null +++ b/ctests/scripts/stage.sh.in @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +set -ux + +TEST_NAME=${1:?"Name of the test is required"} +TEST_DATE=${2:?"idate of the test is required"} + +# CMake to fill these variables +STAGED_TESTS_DIR="@STAGED_TESTS_DIR@" +RUNTESTS="@RUNTESTS@" +HOMEgfs="@PROJECT_SOURCE_DIR@" + +# Load the runtime environment for this script (needs wxflow and its dependencies) +set +x +source "${HOMEgfs}/workflow/gw_setup.sh" +rc=$? +[[ "${rc}" -ne 0 ]] && exit "${status}" +set -x +PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${HOMEgfs}/sorc/wxflow/src" +export PYTHONPATH + +INPUTS_YAML="${HOMEgfs}/ctests/cases/${TEST_NAME}.yaml" + +TEST_NAME="${TEST_NAME}" \ +RUNTESTS="${RUNTESTS}" \ +STAGED_TESTS_DIR="${STAGED_TESTS_DIR}" \ +"${HOMEgfs}/ctests/scripts/stage.py" --yaml "${INPUTS_YAML}" --test_date "${TEST_DATE}" +rc=$? +if [[ "${rc}" -ne 0 ]]; then + set +x + echo "Failed to stage inputs for '${TEST_NAME}' with '${INPUTS_YAML}'" + set -x + exit "${rc}" +fi + +exit 0 diff --git a/ctests/scripts/validate.sh.in b/ctests/scripts/validate.sh.in new file mode 100755 index 0000000000..0277699956 --- /dev/null +++ b/ctests/scripts/validate.sh.in @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -ux + +TEST_NAME=${1:?"Name of the test is required"} +YAML_FILE=${2:?"Name of the CI yaml file for validating the test"} + +echo "validating '${TEST_NAME}' with yaml file '${YAML_FILE}'" + +exit 0 \ No newline at end of file diff --git a/modulefiles/module_gwsetup.hercules.lua b/modulefiles/module_gwsetup.hercules.lua index 9d845fb71d..e7735e4aa1 100644 --- a/modulefiles/module_gwsetup.hercules.lua +++ b/modulefiles/module_gwsetup.hercules.lua @@ -9,6 +9,7 @@ prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/hercules/spack local stack_intel_ver=os.getenv("stack_intel_ver") or "2021.9.0" local python_ver=os.getenv("python_ver") or "3.11.6" +local cmake_ver=os.getenv("cmake_ver") or "3.23.1" load(pathJoin("stack-intel", stack_intel_ver)) load(pathJoin("python", python_ver)) diff --git a/modulefiles/module_gwsetup.orion.lua b/modulefiles/module_gwsetup.orion.lua index b8e2fc8a9f..5ffebc31a1 100644 --- a/modulefiles/module_gwsetup.orion.lua +++ b/modulefiles/module_gwsetup.orion.lua @@ -10,11 +10,13 @@ prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/orion/spack-st local stack_intel_ver=os.getenv("stack_intel_ver") or "2021.9.0" local python_ver=os.getenv("python_ver") or "3.11.6" +local cmake_ver=os.getenv("cmake_ver") or "3.23.1" load(pathJoin("stack-intel", stack_intel_ver)) load(pathJoin("python", python_ver)) load("py-jinja2") load("py-pyyaml") load("py-numpy") +load(pathJoin("cmake", cmake_ver)) whatis("Description: GFS run setup environment") From a72edf45dab7db23b9c2d99175100a019183e8b9 Mon Sep 17 00:00:00 2001 From: AntonMFernando-NOAA <167725623+AntonMFernando-NOAA@users.noreply.github.com> Date: Tue, 21 Jan 2025 12:40:00 -0500 Subject: [PATCH 32/33] Add domain level to wave gridded COM path (#3137) This pull request addresses the organization of wave gridded products within the file structure. Currently, all wave gridded products are placed in products/wave/gridded, without any further categorization. To align with the structure used by other component products (such as grib2), this update introduces a domain/resolution directory level to group output more effectively. For example, instead of having all products in a single directory, the structure will now include specific directories based on domain and resolution (e.g., products/wave/gridded/arctic.9km, products/wave/gridded/global.0p16). This change improves organization and scalability for wave gridded product outputs. Resolves #2677 --- jobs/JGLOBAL_ARCHIVE | 23 ++++- jobs/JGLOBAL_EXTRACTVARS | 5 ++ jobs/JGLOBAL_WAVE_GEMPAK | 10 +++ jobs/JGLOBAL_WAVE_POST_SBS | 14 ++- jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED | 12 ++- parm/archive/gfswave.yaml.j2 | 20 ++++- parm/config/gfs/config.com | 1 + parm/config/gfs/config.waveawipsgridded | 2 + parm/config/gfs/config.wavegempak | 2 + scripts/exgfs_wave_nawips.sh | 10 ++- scripts/exgfs_wave_post_gridded_sbs.sh | 110 ++++++++---------------- scripts/exgfs_wave_prdgen_gridded.sh | 31 ++----- scripts/exglobal_archive.py | 2 +- ush/wave_domain_grid.sh | 43 +++++++++ ush/wave_extractvars.sh | 7 +- ush/wave_grib2_sbs.sh | 39 ++++++--- 16 files changed, 206 insertions(+), 125 deletions(-) create mode 100644 ush/wave_domain_grid.sh diff --git a/jobs/JGLOBAL_ARCHIVE b/jobs/JGLOBAL_ARCHIVE index 401feba35f..f62386cdd9 100755 --- a/jobs/JGLOBAL_ARCHIVE +++ b/jobs/JGLOBAL_ARCHIVE @@ -1,9 +1,8 @@ #! /usr/bin/env bash source "${HOMEgfs}/ush/preamble.sh" -source "${HOMEgfs}/ush/jjob_header.sh" -e "arch" -c "base arch" - - +source "${HOMEgfs}/ush/jjob_header.sh" -e "arch" -c "base arch wave" +source "${USHgfs}/wave_domain_grid.sh" ############################################## # Set variables used in the script ############################################## @@ -50,6 +49,23 @@ for grid in "0p25" "0p50" "1p00"; do "COMIN_ATMOS_GRIB_${grid}:COM_ATMOS_GRIB_GRID_TMPL" done +############################################################### +# Create an array of output wave grids to process +if [[ "${DO_WAVE}" == "YES" ]]; then + # Create a variable for output wave grids to process + if [[ -n "${wavepostGRD}" || -n "${waveinterpGRD}" ]]; then + for grdID in ${wavepostGRD} ${waveinterpGRD}; do + process_grdID "${grdID}" + YMD=${PDY} HH=${cyc} GRDRESNAME=${grdNAME} declare_from_tmpl -rx \ + "COMIN_WAVE_GRID_${GRDREGION}_${GRDRES}:COM_WAVE_GRID_RES_TMPL" + grids_arr+=("${GRDREGION}.${GRDRES}") + done + export WAVE_OUT_GRIDS="${grids_arr[*]}" + else + echo "Both wavepostGRD and waveinterpGRD are empty. No grids to process." + export WAVE_OUT_GRIDS="" + fi +fi ############################################################### # Run archive script ############################################################### @@ -69,7 +85,6 @@ if [[ -e "${pgmout}" ]] ; then cat "${pgmout}" fi - ########################################## # Remove the Temporary working directory ########################################## diff --git a/jobs/JGLOBAL_EXTRACTVARS b/jobs/JGLOBAL_EXTRACTVARS index 3478ca3976..1c1a763a03 100755 --- a/jobs/JGLOBAL_EXTRACTVARS +++ b/jobs/JGLOBAL_EXTRACTVARS @@ -2,6 +2,7 @@ source "${HOMEgfs}/ush/preamble.sh" source "${HOMEgfs}/ush/jjob_header.sh" -e "extractvars" -c "base extractvars" +source "${USHgfs}/wave_domain_grid.sh" # Set COM Paths for grid in '0p25' '0p50' '1p00'; do @@ -19,6 +20,10 @@ YMD="${PDY}" HH="${cyc}" declare_from_tmpl -rx \ "COMIN_ICE_NETCDF:COM_ICE_NETCDF_TMPL" \ "COMIN_WAVE_GRID:COM_WAVE_GRID_TMPL" +process_grdID "${waveGRD}" +YMD=${PDY} HH=${cyc} GRDRESNAME=${grdNAME} declare_from_tmpl -rx \ + "COMIN_WAVE_GRID_${GRDREGION}_${GRDRES}:COM_WAVE_GRID_RES_TMPL" + if [[ "${DO_ATM}" == "YES" ]]; then if [[ ! -d "${ARC_RFCST_PROD_ATMOS_F2D}" ]]; then mkdir -p "${ARC_RFCST_PROD_ATMOS_F2D}"; fi if [[ ! -d "${ARC_RFCST_PROD_ATMOS_F3D}" ]]; then mkdir -p "${ARC_RFCST_PROD_ATMOS_F3D}"; fi diff --git a/jobs/JGLOBAL_WAVE_GEMPAK b/jobs/JGLOBAL_WAVE_GEMPAK index c7b615c560..1c01b0cf64 100755 --- a/jobs/JGLOBAL_WAVE_GEMPAK +++ b/jobs/JGLOBAL_WAVE_GEMPAK @@ -17,6 +17,16 @@ YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ COMIN_WAVE_GRID:COM_WAVE_GRID_TMPL \ COMOUT_WAVE_GEMPAK:COM_WAVE_GEMPAK_TMPL +if [[ -n "${GEMPAK_GRIDS}" ]]; then + for grdID in ${GEMPAK_GRIDS}; do + process_grdID "${grdID}" + YMD=${PDY} HH=${cyc} GRDRESNAME=${grdNAME} declare_from_tmpl -rx \ + "COMIN_WAVE_GRID_${GRDREGION}_${GRDRES}:COM_WAVE_GRID_RES_TMPL" + done +else + echo "waveinterpGRD is empty. No grids to process." +fi + if [[ ! -d ${COMOUT_WAVE_GEMPAK} ]]; then mkdir -p "${COMOUT_WAVE_GEMPAK}"; fi ######################################################## diff --git a/jobs/JGLOBAL_WAVE_POST_SBS b/jobs/JGLOBAL_WAVE_POST_SBS index 53ac4b2083..171d160515 100755 --- a/jobs/JGLOBAL_WAVE_POST_SBS +++ b/jobs/JGLOBAL_WAVE_POST_SBS @@ -2,6 +2,7 @@ source "${HOMEgfs}/ush/preamble.sh" source "${HOMEgfs}/ush/jjob_header.sh" -e "wavepostsbs" -c "base wave wavepostsbs" +source "${USHgfs}/wave_domain_grid.sh" # Add default errchk = err_chk export errchk=${errchk:-err_chk} @@ -19,7 +20,18 @@ for out_dir in "${COMOUT_WAVE_PREP}" "${COMOUT_WAVE_GRID}"; do if [[ ! -d "${out_dir}" ]]; then mkdir -p "${out_dir}"; fi done - +if [[ -n "${wavepostGRD}" || -n "${waveinterpGRD}" ]]; then + for grdID in ${wavepostGRD} ${waveinterpGRD}; do + process_grdID "${grdID}" + YMD=${PDY} HH=${cyc} GRDRESNAME=${grdNAME} declare_from_tmpl -rx \ + "COMOUT_WAVE_GRID_${GRDREGION}_${GRDRES}:COM_WAVE_GRID_RES_TMPL" + out_dir_varname="COMOUT_WAVE_GRID_${GRDREGION}_${GRDRES}" + out_dir=${!out_dir_varname} + if [[ ! -d "${out_dir}" ]]; then mkdir -p "${out_dir}"; fi + done +else + echo "Both wavepostGRD and waveinterpGRD are empty. No grids to process." +fi # Set wave model ID tag to include member number # if ensemble; waveMEMB var empty in deterministic # Set wave model ID tag to include member number diff --git a/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED b/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED index 208b36c535..f84c5abc01 100755 --- a/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED +++ b/jobs/JGLOBAL_WAVE_PRDGEN_GRIDDED @@ -2,6 +2,7 @@ source "${HOMEgfs}/ush/preamble.sh" source "${HOMEgfs}/ush/jjob_header.sh" -e "waveawipsgridded" -c "base wave waveawipsgridded" +source "${USHgfs}/wave_domain_grid.sh" # Add default errchk = err_chk export errchk=${errchk:-err_chk} @@ -19,6 +20,15 @@ YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ if [[ ! -d ${COMOUT_WAVE_WMO} ]]; then mkdir -p "${COMOUT_WAVE_WMO}"; fi +if [[ -n "${GEMPAK_GRIDS}" ]]; then + for grdID in ${GEMPAK_GRIDS}; do + process_grdID "${grdID}" + YMD=${PDY} HH=${cyc} GRDRESNAME=${grdNAME} declare_from_tmpl -rx \ + "COMIN_WAVE_GRID_${GRDREGION}_${GRDRES}:COM_WAVE_GRID_RES_TMPL" + done +else + echo "GEMPAK_GRIDS are empty. No grids to process." +fi ################################### # Execute the Script @@ -35,6 +45,4 @@ if [ "${KEEPDATA}" != "YES" ]; then rm -rf ${DATA} fi - exit 0 - diff --git a/parm/archive/gfswave.yaml.j2 b/parm/archive/gfswave.yaml.j2 index 6909421757..550ecdb1be 100644 --- a/parm/archive/gfswave.yaml.j2 +++ b/parm/archive/gfswave.yaml.j2 @@ -1,20 +1,31 @@ gfswave: {% set head = "gfswave.t" + cycle_HH + "z." %} + name: "GFSWAVE" target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gfswave.tar" required: # Wave GRIB2 regional forecast products + {% set WAVE_OUT_GRIDS_list = WAVE_OUT_GRIDS.split(' ') %} + {% for grd in WAVE_OUT_GRIDS_list %} + {% set tmpl_dict = ({ '${ROTDIR}':ROTDIR, + '${RUN}':RUN, + '${YMD}':cycle_YMD, + '${HH}':cycle_HH, + '${MEMDIR}':'', + '${GRDRESNAME}':grd}) %} + {% set file_path = COM_WAVE_GRID_RES_TMPL | replace_tmpl(tmpl_dict) %} + {% for fh in range(0, FHMAX_HF_WAV + FHOUT_HF_WAV, FHOUT_HF_WAV) %} # NOTE This is as explicit as possible without major logic to parse wavepostGRD. # Matches files of the form "gfswave.tCCz...fHHH.grib2". - - "{{ COMIN_WAVE_GRID | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2" - - "{{ COMIN_WAVE_GRID | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2.idx" + - "{{ file_path | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2" + - "{{ file_path | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2.idx" {% endfor %} # Global wave GRIB2 forecast products {% for fh in range(FHMAX_HF_WAV + FHOUT_WAV, FHMAX_WAV_GFS + FHOUT_WAV, FHOUT_WAV) %} - - "{{ COMIN_WAVE_GRID | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2" - - "{{ COMIN_WAVE_GRID | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2.idx" + - "{{ file_path | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2" + - "{{ file_path | relpath(ROTDIR) }}/{{ head }}*.*.f{{ '%03d' % fh }}.grib2.idx" {% endfor %} # Wave bulletins @@ -28,3 +39,4 @@ gfswave: - "{{ COMIN_WAVE_STATION | relpath(ROTDIR) }}/{{ head }}ibpcbull_tar" - "{{ COMIN_WAVE_STATION | relpath(ROTDIR) }}/{{ head }}ibp_tar" {% endif %} + {% endfor %} diff --git a/parm/config/gfs/config.com b/parm/config/gfs/config.com index d949edb33a..8b6da376f3 100644 --- a/parm/config/gfs/config.com +++ b/parm/config/gfs/config.com @@ -74,6 +74,7 @@ declare -rx COM_WAVE_RESTART_TMPL=${COM_BASE}'/model/wave/restart' declare -rx COM_WAVE_PREP_TMPL=${COM_BASE}'/model/wave/prep' declare -rx COM_WAVE_HISTORY_TMPL=${COM_BASE}'/model/wave/history' declare -rx COM_WAVE_GRID_TMPL=${COM_BASE}'/products/wave/gridded' +declare -rx COM_WAVE_GRID_RES_TMPL=${COM_BASE}'/products/wave/gridded/${GRDRESNAME}' declare -rx COM_WAVE_STATION_TMPL=${COM_BASE}'/products/wave/station' declare -rx COM_WAVE_GEMPAK_TMPL=${COM_BASE}'/products/wave/gempak' declare -rx COM_WAVE_WMO_TMPL=${COM_BASE}'/products/wave/wmo' diff --git a/parm/config/gfs/config.waveawipsgridded b/parm/config/gfs/config.waveawipsgridded index 48cbfda6a7..a752c659af 100644 --- a/parm/config/gfs/config.waveawipsgridded +++ b/parm/config/gfs/config.waveawipsgridded @@ -8,6 +8,8 @@ echo "BEGIN: config.waveawipsgridded" # Get task specific resources . $EXPDIR/config.resources waveawipsgridded +export GEMPAK_GRIDS=${GEMPAK_GRIDS:-ak_10m at_10m ep_10m wc_10m glo_30m} +# export GEMPAK_GRIDS=${GEMPAK_GRIDS:-ao_9km at_10m ep_10m wc_10m glo_30m} # AWIPS output frequency export FHMAX_HF_WAV_WMO=72 export FHMAX_WAV_WMO=180 diff --git a/parm/config/gfs/config.wavegempak b/parm/config/gfs/config.wavegempak index bcbec91f07..c0f53eb1b0 100644 --- a/parm/config/gfs/config.wavegempak +++ b/parm/config/gfs/config.wavegempak @@ -8,5 +8,7 @@ echo "BEGIN: config.wavegempak" # Get task specific resources . $EXPDIR/config.resources wavegempak +#export GEMPAK_GRIDS=${GEMPAK_GRIDS:-'glo_30m at_10m ep_10m wc_10m ao_9km'} #Interpolated grids +export GEMPAK_GRIDS=${GEMPAK_GRIDS:-${waveinterpGRD:-'glo_30m'}} echo "END: config.wavegempak" diff --git a/scripts/exgfs_wave_nawips.sh b/scripts/exgfs_wave_nawips.sh index 68ebfc90c7..f8f65a800a 100755 --- a/scripts/exgfs_wave_nawips.sh +++ b/scripts/exgfs_wave_nawips.sh @@ -12,9 +12,10 @@ ##################################################################### source "${USHgfs}/preamble.sh" +source "${USHgfs}/wave_domain_grid.sh" -#export grids=${grids:-'glo_30m at_10m ep_10m wc_10m ao_9km'} #Interpolated grids -export grids=${grids:-${waveinterpGRD:-'glo_30m'}} #Native grids +#export grids=${GEMPAK_GRIDS:-'glo_30m at_10m ep_10m wc_10m ao_9km'} #Interpolated grids +export grids=${GEMPAK_GRIDS:-${waveinterpGRD:-'glo_30m'}} #Native grids export RUNwave=${RUNwave:-${RUN}wave} export fstart=${fstart:-0} export FHMAX_WAV=${FHMAX_WAV:-180} #180 Total of hours to process @@ -73,7 +74,10 @@ while [ ${fhcnt} -le ${FHMAX_WAV} ]; do *) grdIDin= grdIDout= ;; esac - GRIBIN="${COMIN_WAVE_GRID}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2" + process_grdID "${grid}" + com_varname="COMIN_WAVE_GRID_${GRDREGION}_${GRDRES}" + com_dir=${!com_varname} + GRIBIN="${com_dir}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2" GRIBIN_chk=${GRIBIN}.idx if ! wait_for_file "${GRIBIN_chk}" "${sleep_interval}" "${maxtries}"; then echo "FATAL ERROR: ${GRIBIN_chk} not found after waiting $((sleep_interval * ( maxtries - 1))) secs" diff --git a/scripts/exgfs_wave_post_gridded_sbs.sh b/scripts/exgfs_wave_post_gridded_sbs.sh index 423d6af694..83584e7a5d 100755 --- a/scripts/exgfs_wave_post_gridded_sbs.sh +++ b/scripts/exgfs_wave_post_gridded_sbs.sh @@ -33,6 +33,7 @@ # 0. Preparations source "${USHgfs}/preamble.sh" +source "${USHgfs}/wave_domain_grid.sh" # 0.a Basic modes of operation @@ -218,7 +219,6 @@ source "${USHgfs}/preamble.sh" echo ' Making command file for sbs grib2 and GRID Interpolation ' set_trace fhr=$(( 10#${FHR3} )) - fhrg=$fhr ymdh=$($NDATE $fhr ${PDY}${cyc}) YMD=$(echo $ymdh | cut -c1-8) HMS="$(echo $ymdh | cut -c9-10)0000" @@ -238,75 +238,41 @@ source "${USHgfs}/preamble.sh" export GRDIDATA=${DATA}/output_$YMDHMS # Gridded data (main part, need to be run side-by-side with forecast + gfile="${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_grd.${waveGRD}.${YMD}.${HMS}" + if [[ ! -s "${gfile}" ]]; then + echo " FATAL ERROR : NO RAW FIELD OUTPUT FILE ${gfile}" + err=3; export err; "${errchk}" + exit "${err}" + fi + ${NLN} "${gfile}" "./out_grd.${waveGRD}" - if [ $fhr = $fhrg ] + if [ "$DOGRI_WAV" = 'YES' ] then - gfile="${COMIN_WAVE_HISTORY}/${WAV_MOD_TAG}.out_grd.${waveGRD}.${YMD}.${HMS}" - if [[ ! -s "${gfile}" ]]; then - echo " FATAL ERROR : NO RAW FIELD OUTPUT FILE ${gfile}" - err=3; export err; "${errchk}" - exit "${err}" - fi - ${NLN} "${gfile}" "./out_grd.${waveGRD}" - - if [ "$DOGRI_WAV" = 'YES' ] - then - nigrd=1 - for grdID in $waveinterpGRD - do - ymdh_int=$($NDATE -${WAVHINDH} $ymdh); dt_int=3600.; n_int=9999 ; - echo "${USHgfs}/wave_grid_interp_sbs.sh $grdID $ymdh_int $dt_int $n_int > grint_$grdID.out 2>&1" >> ${fcmdigrd}.${nigrd} - if [ "$DOGRB_WAV" = 'YES' ] - then - gribFL=\'$(echo ${OUTPARS_WAV})\' - case $grdID in - glo_15mxt) GRDNAME='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - reg025) GRDNAME='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - glo_025) GRDNAME='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - glo_100) GRDNAME='global' ; GRDRES=1p00 ; GRIDNR=255 ; MODNR=11 ;; - glo_200) GRDNAME='global' ; GRDRES=2p00 ; GRIDNR=255 ; MODNR=11 ;; - glo_500) GRDNAME='global' ; GRDRES=5p00 ; GRIDNR=255 ; MODNR=11 ;; - glo_30mxt) GRDNAME='global' ; GRDRES=0p50 ; GRIDNR=255 ; MODNR=11 ;; - glo_30m) GRDNAME='global' ; GRDRES=0p50 ; GRIDNR=255 ; MODNR=11 ;; - at_10m) GRDNAME='atlocn' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; - ep_10m) GRDNAME='epacif' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; - wc_10m) GRDNAME='wcoast' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; - ak_10m) GRDNAME='alaska' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; - esac - echo "${USHgfs}/wave_grib2_sbs.sh $grdID $GRIDNR $MODNR $ymdh $fhr $GRDNAME $GRDRES $gribFL > grib_$grdID.out 2>&1" >> ${fcmdigrd}.${nigrd} - fi - echo "${GRIBDATA}/${fcmdigrd}.${nigrd}" >> ${fcmdnow} - chmod 744 ${fcmdigrd}.${nigrd} - nigrd=$((nigrd+1)) - done - fi - - if [ "$DOGRB_WAV" = 'YES' ] - then - for grdID in ${wavepostGRD} # First concatenate grib files for sbs grids - do + nigrd=1 + for grdID in $waveinterpGRD + do + ymdh_int=$($NDATE -${WAVHINDH} $ymdh); dt_int=3600.; n_int=9999 ; + echo "${USHgfs}/wave_grid_interp_sbs.sh $grdID $ymdh_int $dt_int $n_int > grint_$grdID.out 2>&1" >> ${fcmdigrd}.${nigrd} + if [ "$DOGRB_WAV" = 'YES' ] + then gribFL=\'$(echo ${OUTPARS_WAV})\' - case $grdID in - aoc_9km) GRDNAME='arctic' ; GRDRES=9km ; GRIDNR=255 ; MODNR=11 ;; - ant_9km) GRDNAME='antarc' ; GRDRES=9km ; GRIDNR=255 ; MODNR=11 ;; - glo_10m) GRDNAME='global' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; - gnh_10m) GRDNAME='global' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; - gsh_15m) GRDNAME='gsouth' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - glo_15m) GRDNAME='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - ao_20m) GRDNAME='arctic' ; GRDRES=0p33 ; GRIDNR=255 ; MODNR=11 ;; - so_20m) GRDNAME='antarc' ; GRDRES=0p33 ; GRIDNR=255 ; MODNR=11 ;; - glo_15mxt) GRDNAME='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - reg025) GRDNAME='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - glo_025) GRDNAME='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; - glo_100) GRDNAME='global' ; GRDRES=1p00 ; GRIDNR=255 ; MODNR=11 ;; - glo_200) GRDNAME='global' ; GRDRES=2p00 ; GRIDNR=255 ; MODNR=11 ;; - glo_500) GRDNAME='global' ; GRDRES=5p00 ; GRIDNR=255 ; MODNR=11 ;; - gwes_30m) GRDNAME='global' ; GRDRES=0p50 ; GRIDNR=255 ; MODNR=10 ;; - esac - echo "${USHgfs}/wave_grib2_sbs.sh $grdID $GRIDNR $MODNR $ymdh $fhr $GRDNAME $GRDRES $gribFL > grib_$grdID.out 2>&1" >> ${fcmdnow} - done - fi + process_grdID "${grdID}" + echo "${USHgfs}/wave_grib2_sbs.sh $grdID $GRIDNR $MODNR $ymdh $fhr $GRDREGION $GRDRES $gribFL > grib_$grdID.out 2>&1" >> ${fcmdigrd}.${nigrd} + fi + echo "${GRIBDATA}/${fcmdigrd}.${nigrd}" >> ${fcmdnow} + chmod 744 ${fcmdigrd}.${nigrd} + nigrd=$((nigrd+1)) + done + fi + if [ "$DOGRB_WAV" = 'YES' ] + then + for grdID in ${wavepostGRD} # First concatenate grib files for sbs grids + do + gribFL=\'$(echo ${OUTPARS_WAV})\' + process_grdID "${grdID}" + echo "${USHgfs}/wave_grib2_sbs.sh $grdID $GRIDNR $MODNR $ymdh $fhr $GRDREGION $GRDRES $gribFL > grib_$grdID.out 2>&1" >> ${fcmdnow} + done fi if [ ${CFP_MP:-"NO"} = "YES" ]; then @@ -377,15 +343,14 @@ source "${USHgfs}/preamble.sh" rm -f out_grd.* # Remove large binary grid output files cd $DATA - - - if [ "$fhr" = "$fhrg" ] - then + # Check if grib2 file created ENSTAG="" + com_varname="COMOUT_WAVE_GRID_${GRDREGION}_${GRDRES}" + com_dir=${!com_varname} if [ ${waveMEMB} ]; then ENSTAG=".${membTAG}${waveMEMB}" ; fi - gribchk="${RUN}wave.${cycle}${ENSTAG}.${GRDNAME}.${GRDRES}.f${FH3}.grib2" - if [ ! -s ${COMOUT_WAVE_GRID}/${gribchk} ]; then + gribchk="${RUN}wave.${cycle}${ENSTAG}.${GRDREGION}.${GRDRES}.f${FH3}.grib2" + if [ ! -s ${com_dir}/${gribchk} ]; then set +x echo ' ' echo '********************************************' @@ -397,7 +362,6 @@ source "${USHgfs}/preamble.sh" err=5; export err;${errchk} exit "$err" fi - fi # --------------------------------------------------------------------------- # # 7. Ending output diff --git a/scripts/exgfs_wave_prdgen_gridded.sh b/scripts/exgfs_wave_prdgen_gridded.sh index 8fd6b5cc76..4e74d08e79 100755 --- a/scripts/exgfs_wave_prdgen_gridded.sh +++ b/scripts/exgfs_wave_prdgen_gridded.sh @@ -25,6 +25,7 @@ # 0. Preparations source "${USHgfs}/preamble.sh" +source "${USHgfs}/wave_domain_grid.sh" # 0.a Basic modes of operation @@ -48,7 +49,7 @@ source "${USHgfs}/preamble.sh" # Input grid grid_in="${waveinterpGRD:-glo_15mxt}" # Output grids -grids=${grids:-ao_9km at_10m ep_10m wc_10m glo_30m} +grids=${GEMPAK_GRIDS:-ak_10m at_10m ep_10m wc_10m glo_30m} # export grids=${wavepostGRD} maxtries=${maxtries:-720} # 0.b Date and time stuff @@ -88,33 +89,19 @@ grids=${grids:-ao_9km at_10m ep_10m wc_10m glo_30m} # 1.a Grib file (AWIPS and FAX charts) # Get input grid # TODO flesh this out with additional input grids if needed - case ${grid_in} in - glo_200) - grdIDin='global.2p00' ;; - glo_15mxt) - grdIDin='global.0p25' ;; - *) - echo "FATAL ERROR Unrecognized input grid ${grid_in}" - exit 2;; - esac + process_grdID "${grid_in}" + grdIDin=${grdNAME} fhcnt=${fstart} while [[ "${fhcnt}" -le "${FHMAX_WAV}" ]]; do fhr=$(printf "%03d" "${fhcnt}") for grdOut in ${grids}; do - case ${grdOut} in - ao_9km) grdID='arctic.9km' ;; - at_10m) grdID='atlocn.0p16' ;; - ep_10m) grdID='epacif.0p16' ;; - wc_10m) grdID='wcoast.0p16' ;; -# glo_30m) grdID='global.0p25' ;; - glo_30m) grdID='global.0p50' ;; - ak_10m) grdID='alaska.0p16' ;; - *) grdID= ;; - esac - # + process_grdID "${grdout}" + grdIDin=${grdNAME} + com_varname="${COMIN_WAVE_GRID}_${GRDREGION}_${GRDRES}" + com_dir="${!com_varname}" - GRIBIN="${COMIN_WAVE_GRID}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2" + GRIBIN="${com_dir}/${RUNwave}.${cycle}.${grdIDin}.f${fhr}.grib2" GRIBIN_chk="${GRIBIN}.idx" sleep_interval=5 max_tries=1000 diff --git a/scripts/exglobal_archive.py b/scripts/exglobal_archive.py index df49704e06..f64db172ac 100755 --- a/scripts/exglobal_archive.py +++ b/scripts/exglobal_archive.py @@ -40,7 +40,7 @@ def main(): 'NMEM_ENS', 'DO_JEDIATMVAR', 'DO_VRFY_OCEANDA', 'FHMAX_FITS', 'waveGRD', 'IAUFHRS', 'DO_FIT2OBS', 'NET', 'FHOUT_HF_GFS', 'FHMAX_HF_GFS', 'REPLAY_ICS', 'OFFSET_START_HOUR', 'ARCH_EXPDIR', 'EXPDIR', 'ARCH_EXPDIR_FREQ', 'ARCH_HASHES', - 'ARCH_DIFFS', 'SDATE', 'EDATE', 'HOMEgfs', 'DO_GEMPAK'] + 'ARCH_DIFFS', 'SDATE', 'EDATE', 'HOMEgfs', 'DO_GEMPAK', 'WAVE_OUT_GRIDS'] archive_dict = AttrDict() for key in keys: diff --git a/ush/wave_domain_grid.sh b/ush/wave_domain_grid.sh new file mode 100644 index 0000000000..bf393c5513 --- /dev/null +++ b/ush/wave_domain_grid.sh @@ -0,0 +1,43 @@ +#! /usr/bin/env bash + +################################################################################ +## UNIX Script Documentation Block +## Script name: wave_domain_grid.sh +## Script description: provides the wave grid specific values that +## are needed for the wave related jobs +####################### +# Main body starts here +####################### + +process_grdID() { + grdID=$1 + case ${grdID} in + glo_10m) GRDREGION='global' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; + glo_15mxt) GRDREGION='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; + glo_30mxt) GRDREGION='global' ; GRDRES=0p50 ; GRIDNR=255 ; MODNR=11 ;; + glo_30m) GRDREGION='global' ; GRDRES=0p50 ; GRIDNR=255 ; MODNR=11 ;; + glo_025) GRDREGION='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; + glo_100) GRDREGION='global' ; GRDRES=1p00 ; GRIDNR=255 ; MODNR=11 ;; + glo_200) GRDREGION='global' ; GRDRES=2p00 ; GRIDNR=255 ; MODNR=11 ;; + glo_500) GRDREGION='global' ; GRDRES=5p00 ; GRIDNR=255 ; MODNR=11 ;; + at_10m) GRDREGION='atlocn' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; + ep_10m) GRDREGION='epacif' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; + wc_10m) GRDREGION='wcoast' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; + ak_10m) GRDREGION='alaska' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; + aoc_9km) GRDREGION='arctic' ; GRDRES=9km ; GRIDNR=255 ; MODNR=11 ;; + ant_9km) GRDREGION='antarc' ; GRDRES=9km ; GRIDNR=255 ; MODNR=11 ;; + gnh_10m) GRDREGION='global' ; GRDRES=0p16 ; GRIDNR=255 ; MODNR=11 ;; + gsh_15m) GRDREGION='gsouth' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; + ao_20m) GRDREGION='arctic' ; GRDRES=0p33 ; GRIDNR=255 ; MODNR=11 ;; + so_20m) GRDREGION='antarc' ; GRDRES=0p33 ; GRIDNR=255 ; MODNR=11 ;; + reg025) GRDREGION='global' ; GRDRES=0p25 ; GRIDNR=255 ; MODNR=11 ;; + gwes_30m) GRDREGION='global' ; GRDRES=0p50 ; GRIDNR=255 ; MODNR=10 ;; + *) + echo "FATAL ERROR: No grid specific wave config values exist for ${grdID}. Aborting." + exit 1 ;; + esac + grdNAME="${GRDREGION}.${GRDRES}" + echo "grdNAME=${grdNAME}" + echo "GRIDNR=${GRIDNR}" + echo "MODNR=${MODNR}" +} diff --git a/ush/wave_extractvars.sh b/ush/wave_extractvars.sh index 32ee44986b..cad65f777d 100755 --- a/ush/wave_extractvars.sh +++ b/ush/wave_extractvars.sh @@ -10,6 +10,11 @@ ####################### source "${USHgfs}/preamble.sh" +source "${USHgfs}/wave_domain_grid.sh" + +process_grdID "${waveGRD}" +com_varname="COMIN_WAVE_GRID_${GRDREGION}_${GRDRES}" +com_dir=${!com_varname} subdata=${1} @@ -18,7 +23,7 @@ subdata=${1} for (( nh = FHOUT_WAV_EXTRACT; nh <= FHMAX_WAV; nh = nh + FHOUT_WAV_EXTRACT )); do fnh=$(printf "%3.3d" "${nh}") - infile=${COMIN_WAVE_GRID}/${RUN}wave.t${cyc}z.global.${wavres}.f${fnh}.grib2 + infile=${com_dir}/${RUN}wave.t${cyc}z.global.${wavres}.f${fnh}.grib2 outfile=${subdata}/${RUN}wave.t${cyc}z.global.${wavres}.f${fnh}.grib2 rm -f "${outfile}" # Remove outfile if it already exists before extraction diff --git a/ush/wave_grib2_sbs.sh b/ush/wave_grib2_sbs.sh index 22eb361a0d..60e8d2a337 100755 --- a/ush/wave_grib2_sbs.sh +++ b/ush/wave_grib2_sbs.sh @@ -71,8 +71,19 @@ ENSTAG="" if [[ -n ${waveMEMB} ]]; then ENSTAG=".${membTAG}${waveMEMB}" ; fi outfile="${WAV_MOD_TAG}.${cycle}${ENSTAG}.${grdnam}.${grdres}.f${FH3}.grib2" +#create the COM directory var +com_varname="COMOUT_WAVE_GRID_${grdnam}_${grdres}" +com_dir="${!com_varname}" + +# Check if the COM directory exists, create it if necessary +if [[ ! -d "${com_dir}" ]]; then + mkdir -p -m "${com_dir}" + echo "Directory ${com_dir} created." +else + echo "Directory ${com_dir} already exists." +fi # Only create file if not present in COM -if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then +if [[ ! -s "${com_dir}/${outfile}.idx" ]]; then set +x echo ' ' @@ -83,7 +94,7 @@ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then set_trace if [[ -z "${PDY}" ]] || [[ -z ${cyc} ]] || [[ -z "${cycle}" ]] || [[ -z "${EXECgfs}" ]] || \ - [[ -z "${COMOUT_WAVE_GRID}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${gribflags}" ]] || \ + [[ -z "${com_dir}" ]] || [[ -z "${WAV_MOD_TAG}" ]] || [[ -z "${gribflags}" ]] || \ [[ -z "${GRIDNR}" ]] || [[ -z "${MODNR}" ]] || \ [[ -z "${SENDDBN}" ]]; then set +x @@ -158,11 +169,11 @@ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then fi if (( fhr > 0 )); then - ${WGRIB2} gribfile -set_date "${PDY}${cyc}" -set_ftime "${fhr} hour fcst" -grib "${COMOUT_WAVE_GRID}/${outfile}" + ${WGRIB2} gribfile -set_date "${PDY}${cyc}" -set_ftime "${fhr} hour fcst" -grib "${com_dir}/${outfile}" err=$? else ${WGRIB2} gribfile -set_date "${PDY}${cyc}" -set_ftime "${fhr} hour fcst" \ - -set table_1.4 1 -set table_1.2 1 -grib "${COMOUT_WAVE_GRID}/${outfile}" + -set table_1.4 1 -set table_1.2 1 -grib "${com_dir}/${outfile}" err=$? fi @@ -178,7 +189,7 @@ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then fi # Create index - ${WGRIB2} -s "${COMOUT_WAVE_GRID}/${outfile}" > "${COMOUT_WAVE_GRID}/${outfile}.idx" + ${WGRIB2} -s "${com_dir}/${outfile}" > "${com_dir}/${outfile}.idx" # Create grib2 subgrid is this is the source grid if [[ "${grdID}" = "${WAV_SUBGRBSRC}" ]]; then @@ -187,14 +198,14 @@ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then subgrbnam=$(echo ${!subgrb} | cut -d " " -f 21) subgrbres=$(echo ${!subgrb} | cut -d " " -f 22) subfnam="${WAV_MOD_TAG}.${cycle}${ENSTAG}.${subgrbnam}.${subgrbres}.f${FH3}.grib2" - ${COPYGB2} -g "${subgrbref}" -i0 -x "${COMOUT_WAVE_GRID}/${outfile}" "${COMOUT_WAVE_GRID}/${subfnam}" - ${WGRIB2} -s "${COMOUT_WAVE_GRID}/${subfnam}" > "${COMOUT_WAVE_GRID}/${subfnam}.idx" + ${COPYGB2} -g "${subgrbref}" -i0 -x "${com_dir}/${outfile}" "${com_dir}/${subfnam}" + ${WGRIB2} -s "${com_dir}/${subfnam}" > "${com_dir}/${subfnam}.idx" done fi # 1.e Save in /com - if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}" ]]; then + if [[ ! -s "${com_dir}/${outfile}" ]]; then set +x echo ' ' echo '********************************************* ' @@ -206,7 +217,7 @@ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then set_trace exit 4 fi - if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then + if [[ ! -s "${com_dir}/${outfile}.idx" ]]; then set +x echo ' ' echo '*************************************************** ' @@ -221,11 +232,11 @@ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then if [[ "${SENDDBN}" = 'YES' ]] && [[ ${outfile} != *global.0p50* ]]; then set +x - echo " Alerting GRIB file as ${COMOUT_WAVE_GRID}/${outfile}" - echo " Alerting GRIB index file as ${COMOUT_WAVE_GRID}/${outfile}.idx" + echo " Alerting GRIB file as ${com_dir}/${outfile}" + echo " Alerting GRIB index file as ${com_dir}/${outfile}.idx" set_trace - "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2" "${job}" "${COMOUT_WAVE_GRID}/${outfile}" - "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2_WIDX" "${job}" "${COMOUT_WAVE_GRID}/${outfile}.idx" + "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2" "${job}" "${com_dir}/${outfile}" + "${DBNROOT}/bin/dbn_alert" MODEL "${alertName}_WAVE_GB2_WIDX" "${job}" "${com_dir}/${outfile}.idx" else echo "${outfile} is global.0p50 or SENDDBN is NO, no alert sent" fi @@ -246,7 +257,7 @@ if [[ ! -s "${COMOUT_WAVE_GRID}/${outfile}.idx" ]]; then else set +x echo ' ' - echo " File ${COMOUT_WAVE_GRID}/${outfile} found, skipping generation process" + echo " File ${com_dir}/${outfile} found, skipping generation process" echo ' ' set_trace fi From 4ab8cf816d17a563d922ce8f1104c11c2fb9337f Mon Sep 17 00:00:00 2001 From: Rahul Mahajan Date: Tue, 21 Jan 2025 16:54:00 -0500 Subject: [PATCH 33/33] Add echgres as a dependency only for RUN=enkfgdas, not enkfgfs (#3246) This PR: - adds echgres as a dependency only for `RUN=enkfgdas`, not `enkfgfs` Resolves #3244 --- workflow/rocoto/gfs_tasks.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 768512ba39..08d260fe57 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -2919,12 +2919,14 @@ def earc(self): deps = [] if 'enkfgdas' in self.run: dep_dict = {'type': 'metatask', 'name': f'{self.run}_epmn'} - else: + deps.append(rocoto.add_dependency(dep_dict)) + dep_dict = {'type': 'task', 'name': f'{self.run}_echgres'} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) + else: # early cycle enkf run (enkfgfs) dep_dict = {'type': 'task', 'name': f'{self.run}_esfc'} - deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'task', 'name': f'{self.run}_echgres'} - deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep=deps) earcenvars = self.envars.copy() earcenvars.append(rocoto.create_envar(name='ENSGRP', value='#grp#'))