diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 3ab91f6be2..769994adfc 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -34,5 +34,5 @@ If your code changes are available on GitHub, please provide the repository. ### Build information Please describe: - 1. The machine you are running on (e.g. windows laptop, NCAR supercomputer Cheyenne). + 1. The machine you are running on (e.g. windows laptop, NSF NCAR supercomputer Derecho). 2. The compiler you are using (e.g. gnu, intel). diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5ad6ff2d62..036388428e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -22,6 +22,13 @@ individual files. The changes are now listed with the most recent at the top. +**March 13 2024 :: Update WRF-DART scripts and bug template to Derecho; remove no-op routines in ensemble manager. Tag v11.3.1** + +- Updated the csh scripting templates used to run WRF-DART and WRF-DART tutorial from Cheyenne to Derecho +- Updated bug report template to use Derecho instead of Cheyenne +- Removed the following no-op routines from ensemble manager: prepare_to_write_to_vars, prepare_to_write_to_copies, + prepare_to_read_from_vars, prepare_to_read_from_copies, prepare_to_update_vars, prepare_to_update_copies + **March 12 2024 :: MITgcm/N-BLING with Compressed Staggered Grids. Tag v11.3.0** - The DART-MITgcm code now supports compressed grids, especially suited for areas like diff --git a/assimilation_code/modules/assimilation/assim_tools_mod.pf.f90 b/assimilation_code/modules/assimilation/assim_tools_mod.pf.f90 index 5892aa139c..9bab10ecd6 100644 --- a/assimilation_code/modules/assimilation/assim_tools_mod.pf.f90 +++ b/assimilation_code/modules/assimilation/assim_tools_mod.pf.f90 @@ -62,7 +62,7 @@ module assim_tools_mod use ensemble_manager_mod, only : ensemble_type, get_my_num_vars, get_my_vars, & compute_copy_mean_var, get_var_owner_index, & - prepare_to_update_copies, map_pe_to_task + map_pe_to_task use mpi_utilities_mod, only : my_task_id, broadcast_send, broadcast_recv, & sum_across_tasks, task_count, start_mpi_timer, & @@ -496,10 +496,6 @@ subroutine filter_assim(ens_handle, obs_ens_handle, obs_seq, keys, & my_state_loc( ens_handle%my_num_vars)) ! end alloc -! we are going to read/write the copies array -call prepare_to_update_copies(ens_handle) -call prepare_to_update_copies(obs_ens_handle) - ! Initialize assim_tools_module if needed if (.not. module_initialized) call assim_tools_init() diff --git a/assimilation_code/modules/assimilation/filter_mod.dopplerfold.f90 b/assimilation_code/modules/assimilation/filter_mod.dopplerfold.f90 index 9041bfc221..e9cf2146df 100644 --- a/assimilation_code/modules/assimilation/filter_mod.dopplerfold.f90 +++ b/assimilation_code/modules/assimilation/filter_mod.dopplerfold.f90 @@ -46,11 +46,10 @@ module filter_mod compute_copy_mean, compute_copy_mean_sd, & compute_copy_mean_var, duplicate_ens, get_copy_owner_index, & get_ensemble_time, set_ensemble_time, broadcast_copy, & - map_pe_to_task, prepare_to_update_copies, & - copies_in_window, set_num_extra_copies, get_allow_transpose, & - all_copies_to_all_vars, allocate_single_copy, allocate_vars, & - get_single_copy, put_single_copy, deallocate_single_copy, & - print_ens_handle + map_pe_to_task, copies_in_window, set_num_extra_copies, & + get_allow_transpose, all_copies_to_all_vars, & + allocate_single_copy, allocate_vars, get_single_copy, & + put_single_copy, deallocate_single_copy, print_ens_handle use adaptive_inflate_mod, only : do_ss_inflate, mean_from_restart, sd_from_restart, & inflate_ens, adaptive_inflate_init, & @@ -806,7 +805,6 @@ subroutine filter_main() call trace_message('Before prior inflation damping and prep') if (inf_damping(PRIOR_INF) /= 1.0_r8) then - call prepare_to_update_copies(state_ens_handle) state_ens_handle%copies(PRIOR_INF_COPY, :) = 1.0_r8 + & inf_damping(PRIOR_INF) * (state_ens_handle%copies(PRIOR_INF_COPY, :) - 1.0_r8) endif @@ -907,7 +905,6 @@ subroutine filter_main() call trace_message('Before posterior inflation damping') if (inf_damping(POSTERIOR_INF) /= 1.0_r8) then - call prepare_to_update_copies(state_ens_handle) state_ens_handle%copies(POST_INF_COPY, :) = 1.0_r8 + & inf_damping(POSTERIOR_INF) * (state_ens_handle%copies(POST_INF_COPY, :) - 1.0_r8) endif @@ -1549,9 +1546,6 @@ subroutine filter_ensemble_inflate(ens_handle, inflate_copy, inflate, ENS_MEAN_C integer :: j, group, grp_bot, grp_top, grp_size -! Assumes that the ensemble is copy complete -call prepare_to_update_copies(ens_handle) - ! Inflate each group separately; Divide ensemble into num_groups groups grp_size = ens_size / num_groups @@ -2827,8 +2821,6 @@ subroutine update_observations_radar(obs_ens_handle, ens_size, seq, keys, prior_ ! for quiet execution, set it to false. verbose = .true. -call prepare_to_update_copies(obs_ens_handle) - do j = 1, obs_ens_handle%my_num_vars ! get the key number associated with each of my subset of obs ! then get the obs and extract info from it. diff --git a/assimilation_code/modules/assimilation/filter_mod.f90 b/assimilation_code/modules/assimilation/filter_mod.f90 index 75d0560de6..0971d7821e 100644 --- a/assimilation_code/modules/assimilation/filter_mod.f90 +++ b/assimilation_code/modules/assimilation/filter_mod.f90 @@ -46,11 +46,10 @@ module filter_mod compute_copy_mean, compute_copy_mean_sd, & compute_copy_mean_var, duplicate_ens, get_copy_owner_index, & get_ensemble_time, set_ensemble_time, broadcast_copy, & - map_pe_to_task, prepare_to_update_copies, & - copies_in_window, set_num_extra_copies, get_allow_transpose, & - all_copies_to_all_vars, allocate_single_copy, allocate_vars, & - get_single_copy, put_single_copy, deallocate_single_copy, & - print_ens_handle + map_pe_to_task, copies_in_window, set_num_extra_copies, & + get_allow_transpose, all_copies_to_all_vars, & + allocate_single_copy, allocate_vars, get_single_copy, & + put_single_copy, deallocate_single_copy, print_ens_handle use adaptive_inflate_mod, only : do_ss_inflate, mean_from_restart, sd_from_restart, & inflate_ens, adaptive_inflate_init, & @@ -809,7 +808,6 @@ subroutine filter_main() call trace_message('Before prior inflation damping and prep') if (inf_damping(PRIOR_INF) /= 1.0_r8) then - call prepare_to_update_copies(state_ens_handle) state_ens_handle%copies(PRIOR_INF_COPY, :) = 1.0_r8 + & inf_damping(PRIOR_INF) * (state_ens_handle%copies(PRIOR_INF_COPY, :) - 1.0_r8) endif @@ -910,7 +908,6 @@ subroutine filter_main() call trace_message('Before posterior inflation damping') if (inf_damping(POSTERIOR_INF) /= 1.0_r8) then - call prepare_to_update_copies(state_ens_handle) state_ens_handle%copies(POST_INF_COPY, :) = 1.0_r8 + & inf_damping(POSTERIOR_INF) * (state_ens_handle%copies(POST_INF_COPY, :) - 1.0_r8) endif @@ -1566,9 +1563,6 @@ subroutine filter_ensemble_inflate(ens_handle, inflate_copy, inflate, ENS_MEAN_C real(r8) :: lower_bound, upper_bound integer :: dist_type -! Assumes that the ensemble is copy complete -call prepare_to_update_copies(ens_handle) - ! Inflate each group separately; Divide ensemble into num_groups groups grp_size = ens_size / num_groups diff --git a/assimilation_code/modules/assimilation/obs_model_mod.f90 b/assimilation_code/modules/assimilation/obs_model_mod.f90 index 9cb8618995..0f04dd1db2 100644 --- a/assimilation_code/modules/assimilation/obs_model_mod.f90 +++ b/assimilation_code/modules/assimilation/obs_model_mod.f90 @@ -20,8 +20,8 @@ module obs_model_mod operator(/=), operator(>), operator(-), & operator(/), operator(+), operator(<), operator(==), & operator(<=), operator(>=) -use ensemble_manager_mod, only : get_ensemble_time, ensemble_type, map_task_to_pe, & - prepare_to_update_vars +use ensemble_manager_mod, only : get_ensemble_time, ensemble_type, map_task_to_pe + use mpi_utilities_mod, only : my_task_id, task_sync, block_task, & sum_across_tasks, shell_execute, my_task_id use io_filenames_mod, only : file_info_type @@ -348,8 +348,6 @@ subroutine advance_state(ens_handle, ens_size, target_time, async, adv_ens_comma ! Ok, this task does need to advance something. need_advance = 1 - call prepare_to_update_vars(ens_handle) - ! Increment number of ensemble member copies I have. my_num_state_copies = my_num_state_copies + 1 diff --git a/assimilation_code/modules/observations/forward_operator_mod.f90 b/assimilation_code/modules/observations/forward_operator_mod.f90 index a12e45cb1d..52e07ec444 100644 --- a/assimilation_code/modules/observations/forward_operator_mod.f90 +++ b/assimilation_code/modules/observations/forward_operator_mod.f90 @@ -30,8 +30,6 @@ module forward_operator_mod use obs_kind_mod, only : assimilate_this_type_of_obs, evaluate_this_type_of_obs use ensemble_manager_mod, only : ensemble_type, compute_copy_mean_var, & - prepare_to_read_from_vars, & - prepare_to_write_to_vars, & get_my_num_copies, copies_in_window, & get_allow_transpose, all_vars_to_all_copies, & all_copies_to_all_vars, allocate_single_copy, & @@ -127,11 +125,6 @@ subroutine get_obs_ens_distrib_state(ens_handle, obs_fwd_op_ens_handle, & istatus = 999123 expected_obs = MISSING_R8 -! FIXME: these no longer do anything? -! call prepare_to_write_to_vars(obs_fwd_op_ens_handle) -! call prepare_to_write_to_vars(qc_ens_handle) -! call prepare_to_read_from_vars(ens_handle) - ! Set up access to the state call create_state_window(ens_handle, obs_fwd_op_ens_handle, qc_ens_handle) diff --git a/assimilation_code/modules/utilities/ensemble_manager_mod.f90 b/assimilation_code/modules/utilities/ensemble_manager_mod.f90 index df5726ec22..11e1258e88 100644 --- a/assimilation_code/modules/utilities/ensemble_manager_mod.f90 +++ b/assimilation_code/modules/utilities/ensemble_manager_mod.f90 @@ -37,9 +37,7 @@ module ensemble_manager_mod get_copy, put_copy, all_vars_to_all_copies, & all_copies_to_all_vars, allocate_vars, deallocate_vars, & compute_copy_mean_var, get_copy_owner_index, set_ensemble_time, & - broadcast_copy, prepare_to_write_to_vars, prepare_to_write_to_copies, & - prepare_to_read_from_vars, prepare_to_read_from_copies, prepare_to_update_vars, & - prepare_to_update_copies, print_ens_handle, set_current_time, & + broadcast_copy, print_ens_handle, set_current_time, & map_task_to_pe, map_pe_to_task, get_current_time, & allocate_single_copy, put_single_copy, get_single_copy, & deallocate_single_copy @@ -66,7 +64,6 @@ module ensemble_manager_mod ! Time is only related to var complete type(time_type), allocatable :: time(:) integer :: distribution_type - integer :: valid ! copies modified last, vars modified last, both same integer :: id_num integer, allocatable :: task_to_pe_list(:), pe_to_task_list(:) ! List of tasks ! Flexible my_pe, layout_type which allows different task layouts for different ensemble handles @@ -83,13 +80,6 @@ module ensemble_manager_mod !PAR some way, either allocating or multiple addressing, to use same chunk of storage !PAR for both copy and var complete representations. -! track if copies modified last, vars modified last, both are in sync -! (and therefore both valid to be used r/o), or unknown. -integer, parameter :: VALID_UNKNOWN = -1 -integer, parameter :: VALID_BOTH = 0 ! vars & copies have same data -integer, parameter :: VALID_VARS = 1 ! vars(:,:) modified last -integer, parameter :: VALID_COPIES = 2 ! copies(:,:) modified last - ! unique counter per ensemble handle integer :: global_counter = 1 @@ -237,9 +227,6 @@ subroutine init_ensemble_manager(ens_handle, num_copies, & source, text2=msgstring) endif -! initially no data -ens_handle%valid = VALID_BOTH - if(debug .and. my_task_id()==0) then print*, 'pe_to_task_list', ens_handle%pe_to_task_list print*, 'task_to_pe_list', ens_handle%task_to_pe_list @@ -278,11 +265,6 @@ subroutine get_copy(receiving_pe, ens_handle, copy, vars, mtime) integer :: owner, owners_index -! Error checking -if (ens_handle%valid /= VALID_VARS .and. ens_handle%valid /= VALID_BOTH) then - call error_handler(E_ERR, 'get_copy', 'last access not var-complete', source) -endif - ! Verify that requested copy exists if(copy < 1 .or. copy > ens_handle%num_copies) then write(msgstring, *) 'Requested copy: ', copy, ' is > maximum copy: ', ens_handle%num_copies @@ -348,11 +330,6 @@ subroutine put_copy(sending_pe, ens_handle, copy, vars, mtime) integer :: owner, owners_index -! Error checking -if (ens_handle%valid /= VALID_VARS .and. ens_handle%valid /= VALID_BOTH) then - call error_handler(E_ERR, 'put_copy', 'last access not var-complete', source) -endif - if(copy < 1 .or. copy > ens_handle%num_copies) then write(msgstring, *) 'Requested copy: ', copy, ' is > maximum copy: ', ens_handle%num_copies call error_handler(E_ERR,'put_copy', msgstring, source) @@ -392,8 +369,6 @@ subroutine put_copy(sending_pe, ens_handle, copy, vars, mtime) endif endif -ens_handle%valid = VALID_VARS - end subroutine put_copy !----------------------------------------------------------------- @@ -411,11 +386,6 @@ subroutine broadcast_copy(ens_handle, copy, arraydata) integer :: owner, owners_index -! Error checking -if (ens_handle%valid /= VALID_VARS .and. ens_handle%valid /= VALID_BOTH) then - call error_handler(E_ERR, 'broadcast_copy', 'last access not var-complete', source) -endif - if(copy < 1 .or. copy > ens_handle%num_copies) then write(msgstring, *) 'Requested copy: ', copy, ' is > maximum copy: ', ens_handle%num_copies call error_handler(E_ERR,'broadcast_copy', msgstring, source) @@ -442,94 +412,6 @@ end subroutine broadcast_copy !----------------------------------------------------------------- -subroutine prepare_to_write_to_vars(ens_handle) - -! Warn ens manager that we're going to directly update the %vars array - -type(ensemble_type), intent(inout) :: ens_handle - -!ens_handle%valid = VALID_VARS - -end subroutine prepare_to_write_to_vars - -!----------------------------------------------------------------- - -subroutine prepare_to_write_to_copies(ens_handle) - -! Warn ens manager that we're going to directly update the %copies array - -type(ensemble_type), intent(inout) :: ens_handle - -!ens_handle%valid = VALID_COPIES - -end subroutine prepare_to_write_to_copies - -!----------------------------------------------------------------- - -subroutine prepare_to_read_from_vars(ens_handle) - -! Check to be sure that the vars array is current - -type(ensemble_type), intent(in) :: ens_handle - -!if (ens_handle%valid /= VALID_VARS .and. ens_handle%valid /= VALID_BOTH) then -! call error_handler(E_ERR, 'prepare_to_read_from_vars', & - ! 'last access not var-complete', source) -!endif - -end subroutine prepare_to_read_from_vars - -!----------------------------------------------------------------- - -subroutine prepare_to_read_from_copies(ens_handle) - -! Check to be sure that the copies array is current - -type(ensemble_type), intent(in) :: ens_handle - -!if (ens_handle%valid /= VALID_COPIES .and. ens_handle%valid /= VALID_BOTH) then -! call error_handler(E_ERR, 'prepare_to_read_from_copies', & -! 'last access not copy-complete', source) -!endif - -end subroutine prepare_to_read_from_copies - -!----------------------------------------------------------------- - -subroutine prepare_to_update_vars(ens_handle) - -! We need read/write access, so it has to start valid for vars or both, -! and then is going to be vars only going out. - -type(ensemble_type), intent(inout) :: ens_handle - -!if (ens_handle%valid /= VALID_VARS .and. ens_handle%valid /= VALID_BOTH) then -! call error_handler(E_ERR, 'prepare_to_update_vars', & - ! 'last access not var-complete', source) -!endif -!ens_handle%valid = VALID_VARS - -end subroutine prepare_to_update_vars - -!----------------------------------------------------------------- - -subroutine prepare_to_update_copies(ens_handle) - -! We need read/write access, so it has to start valid for copies or both, -! and then is going to be copies only going out. - -type(ensemble_type), intent(inout) :: ens_handle - -!if (ens_handle%valid /= VALID_COPIES .and. ens_handle%valid /= VALID_BOTH) then -! call error_handler(E_ERR, 'prepare_to_update_copies', & -! 'last access not copy-complete', source) -!endif -!ens_handle%valid = VALID_COPIES - -end subroutine prepare_to_update_copies - -!----------------------------------------------------------------- - subroutine set_ensemble_time(ens_handle, indx, mtime) ! Sets the time of an ensemble member indexed by local storage on this pe. @@ -596,12 +478,6 @@ subroutine duplicate_ens(ens1, ens2, duplicate_time) ! If duplicate_time is true, also copies the time information from ens1 to ens2. ! If duplicate_time is false, the times in ens2 are left unchanged. -! Error checking -if (ens1%valid /= VALID_VARS .and. ens1%valid /= VALID_BOTH) then - call error_handler(E_ERR, 'duplicate_ens', & - 'last access not var-complete for source ensemble', source) -endif - ! Check to make sure that the ensembles are compatible if(ens1%num_copies /= ens2%num_copies) then write(msgstring, *) 'num_copies ', ens1%num_copies, ' and ', ens2%num_copies, & @@ -622,8 +498,6 @@ subroutine duplicate_ens(ens1, ens2, duplicate_time) ! Duplicate each copy that is stored locally on this process. ens2%vars = ens1%vars -ens2%valid = VALID_VARS - ! Duplicate time if requested if(duplicate_time) ens2%time = ens1%time @@ -1056,25 +930,6 @@ subroutine all_vars_to_all_copies(ens_handle, label) call timestamp_message('vars_to_copies start: '//label, alltasks=.true.) endif -! Error checking, but can't return early in case only some of the -! MPI tasks need to transpose. Only if all N tasks say this is an -! unneeded transpose can we skip it. -!if (ens_handle%valid == VALID_BOTH) then -! if (flag_unneeded_transposes) then -! write(msgstring, *) 'task ', my_task_id(), ' ens_handle ', ens_handle%id_num -! call error_handler(E_MSG, 'all_vars_to_all_copies', & -! 'vars & copies both valid, transpose not needed for this task', & -! source, text2=msgstring) -! endif -!else if (ens_handle%valid /= VALID_VARS) then -! write(msgstring, *) 'ens_handle ', ens_handle%id_num -! call error_handler(E_ERR, 'all_vars_to_all_copies', & -! 'last access not var-complete', source, & -! text2=msgstring) -!endif - -ens_handle%valid = VALID_BOTH - ! Accelerated version for single process if(num_pes == 1) then ens_handle%copies = transpose(ens_handle%vars) @@ -1232,25 +1087,6 @@ subroutine all_copies_to_all_vars(ens_handle, label) call timestamp_message('copies_to_vars start: '//label, alltasks=.true.) endif -! Error checking, but can't return early in case only some of the -! MPI tasks need to transpose. Only if all N tasks say this is an -! unneeded transpose can we skip it. -!if (ens_handle%valid == VALID_BOTH) then -! if (flag_unneeded_transposes) then -! write(msgstring, *) 'task ', my_task_id(), ' ens_handle ', ens_handle%id_num -! call error_handler(E_MSG, 'all_copies_to_all_vars', & -! 'vars & copies both valid, transpose not needed for this task', & -! source, text2=msgstring) -! endif -!else if (ens_handle%valid /= VALID_COPIES) then -! write(msgstring, *) 'ens_handle ', ens_handle%id_num -! call error_handler(E_ERR, 'all_copies_to_all_vars', & -! 'last access not copy-complete', source, & -! text2=msgstring) -!endif - -ens_handle%valid = VALID_BOTH - ! Accelerated version for single process if(num_pes == 1) then ens_handle%vars = transpose(ens_handle%copies) @@ -1416,12 +1252,6 @@ subroutine compute_copy_mean(ens_handle, start_copy, end_copy, mean_copy) ! Should check to make sure that start, end and mean are all legal -! Error checking -if (ens_handle%valid /= VALID_COPIES .and. ens_handle%valid /= VALID_BOTH) then - call error_handler(E_ERR, 'compute_copy_mean', & - 'last access not copy-complete', source) -endif - num_copies = end_copy - start_copy + 1 MYLOOP : do i = 1, ens_handle%my_num_vars @@ -1432,8 +1262,6 @@ subroutine compute_copy_mean(ens_handle, start_copy, end_copy, mean_copy) endif end do MYLOOP -ens_handle%valid = VALID_COPIES - end subroutine compute_copy_mean !-------------------------------------------------------------------------------- @@ -1450,12 +1278,6 @@ subroutine compute_copy_mean_sd(ens_handle, start_copy, end_copy, mean_copy, sd_ ! Should check to make sure that start, end, mean and sd are all legal copies -! Error checking -!if (ens_handle%valid /= VALID_COPIES .and. ens_handle%valid /= VALID_BOTH) then -! call error_handler(E_ERR, 'compute_copy_mean_sd', & -! 'last access not copy-complete', source) -!endif - num_copies = end_copy - start_copy + 1 MYLOOP : do i = 1, ens_handle%my_num_vars @@ -1475,8 +1297,6 @@ subroutine compute_copy_mean_sd(ens_handle, start_copy, end_copy, mean_copy, sd_ end do MYLOOP -ens_handle%valid = VALID_COPIES - end subroutine compute_copy_mean_sd !-------------------------------------------------------------------------------- @@ -1494,12 +1314,6 @@ subroutine compute_copy_mean_var(ens_handle, start_copy, end_copy, mean_copy, va ! Should check to make sure that start, end, mean and var are all legal copies -! Error checking -if (ens_handle%valid /= VALID_COPIES .and. ens_handle%valid /= VALID_BOTH) then - call error_handler(E_ERR, 'compute_copy_mean_var', & - 'last access not copy-complete', source) -endif - num_copies = end_copy - start_copy + 1 MYLOOP : do i = 1, ens_handle%my_num_vars @@ -1517,8 +1331,6 @@ subroutine compute_copy_mean_var(ens_handle, start_copy, end_copy, mean_copy, va endif end do MYLOOP -ens_handle%valid = VALID_COPIES - end subroutine compute_copy_mean_var !-------------------------------------------------------------------------------- @@ -1612,8 +1424,6 @@ subroutine print_ens_handle(ens_handle, force, label, contents, limit) call error_handler(E_MSG, 'ensemble handle: ', msgstring, source) write(msgstring, *) 'number of my_vars : ', ens_handle%my_num_vars call error_handler(E_MSG, 'ensemble handle: ', msgstring, source) -write(msgstring, *) 'valid : ', ens_handle%valid -call error_handler(E_MSG, 'ensemble handle: ', msgstring, source) write(msgstring, *) 'distribution_type : ', ens_handle%distribution_type call error_handler(E_MSG, 'ensemble handle: ', msgstring, source) write(msgstring, *) 'my_pe number : ', ens_handle%my_pe diff --git a/assimilation_code/modules/utilities/ensemble_manager_mod.rst b/assimilation_code/modules/utilities/ensemble_manager_mod.rst index f33d7b357c..1e31b109b8 100644 --- a/assimilation_code/modules/utilities/ensemble_manager_mod.rst +++ b/assimilation_code/modules/utilities/ensemble_manager_mod.rst @@ -140,12 +140,6 @@ Public interfaces \ compute_copy_mean \ compute_copy_mean_sd \ compute_copy_mean_var -\ prepare_to_write_to_vars -\ prepare_to_write_to_copies -\ prepare_to_read_from_vars -\ prepare_to_read_from_copies -\ prepare_to_update_vars -\ prepare_to_update_copies \ print_ens_handle \ map_pe_to_task \ map_task_to_pe @@ -174,7 +168,6 @@ A note about documentation style. Optional arguments are enclosed in brackets *[ ! Time is only related to var complete type(time_type), pointer :: time(:) integer :: distribution_type - integer :: valid ! copies modified last, vars modified last, both same integer :: id_num integer, allocatable :: task_to_pe_list(:) ! List of tasks integer, allocatable :: pe_to_task_list(:) ! List of tasks @@ -796,182 +789,6 @@ A note about documentation style. Optional arguments are enclosed in brackets *[ | -.. container:: routine - - *call prepare_to_update_vars(ens_handle)* - :: - - type(ensemble_type), intent(inout) :: ens_handle - -.. container:: indent1 - - Call this routine before directly accessing the ``ens_handle%vars`` array when the data is going to be updated, and - the incoming vars array should have the most current data representation. - - Internally the ensemble manager tracks which of the copies or vars arrays, or both, have the most recently updated - representation of the data. For example, before a transpose (``all_vars_to_all_copies()`` or - ``all_copies_to_all_vars()``) the code checks to be sure the source array has the most recently updated - representation before it does the operation. After a transpose both representations have the same update time and are - both valid. - - For efficiency reasons we allow the copies and vars arrays to be accessed directly from other code without going - through a routine in the ensemble manager. The "prepare" routines verify that the desired array has the most recently - updated representation of the data, and if needed marks which one has been updated so the internal consistency checks - have an accurate accounting of the representations. - - ============== ================================================ - ``ens_handle`` Handle for the ensemble being accessed directly. - ============== ================================================ - -| - -.. container:: routine - - *call prepare_to_update_copies(ens_handle)* - :: - - type(ensemble_type), intent(inout) :: ens_handle - -.. container:: indent1 - - Call this routine before directly accessing the ``ens_handle%copies`` array when the data is going to be updated, and - the incoming copies array should have the most current data representation. - - Internally the ensemble manager tracks which of the copies or vars arrays, or both, have the most recently updated - representation of the data. For example, before a transpose (``all_vars_to_all_copies()`` or - ``all_copies_to_all_vars()``) the code checks to be sure the source array has the most recently updated - representation before it does the operation. After a transpose both representations have the same update time and are - both valid. - - For efficiency reasons we allow the copies and vars arrays to be accessed directly from other code without going - through a routine in the ensemble manager. The "prepare" routines verify that the desired array has the most recently - updated representation of the data, and if needed marks which one has been updated so the internal consistency checks - have an accurate accounting of the representations. - - ============== ================================================ - ``ens_handle`` Handle for the ensemble being accessed directly. - ============== ================================================ - -| - -.. container:: routine - - *call prepare_to_read_from_vars(ens_handle)* - :: - - type(ensemble_type), intent(inout) :: ens_handle - -.. container:: indent1 - - Call this routine before directly accessing the ``ens_handle%vars`` array for reading only, when the incoming vars - array should have the most current data representation. - - Internally the ensemble manager tracks which of the copies or vars arrays, or both, have the most recently updated - representation of the data. For example, before a transpose (``all_vars_to_all_copies()`` or - ``all_copies_to_all_vars()``) the code checks to be sure the source array has the most recently updated - representation before it does the operation. After a transpose both representations have the same update time and are - both valid. - - For efficiency reasons we allow the copies and vars arrays to be accessed directly from other code without going - through a routine in the ensemble manager. The "prepare" routines verify that the desired array has the most recently - updated representation of the data, and if needed marks which one has been updated so the internal consistency checks - have an accurate accounting of the representations. - - ============== ================================================ - ``ens_handle`` Handle for the ensemble being accessed directly. - ============== ================================================ - -| - -.. container:: routine - - *call prepare_to_read_from_copies(ens_handle)* - :: - - type(ensemble_type), intent(inout) :: ens_handle - -.. container:: indent1 - - Call this routine before directly accessing the ``ens_handle%copies`` array for reading only, when the incoming - copies array should have the most current data representation. - - Internally the ensemble manager tracks which of the copies or vars arrays, or both, have the most recently updated - representation of the data. For example, before a transpose (``all_vars_to_all_copies()`` or - ``all_copies_to_all_vars()``) the code checks to be sure the source array has the most recently updated - representation before it does the operation. After a transpose both representations have the same update time and are - both valid. - - For efficiency reasons we allow the copies and vars arrays to be accessed directly from other code without going - through a routine in the ensemble manager. The "prepare" routines verify that the desired array has the most recently - updated representation of the data, and if needed marks which one has been updated so the internal consistency checks - have an accurate accounting of the representations. - - ============== ================================================ - ``ens_handle`` Handle for the ensemble being accessed directly. - ============== ================================================ - -| - -.. container:: routine - - *call prepare_to_write_to_vars(ens_handle)* - :: - - type(ensemble_type), intent(inout) :: ens_handle - -.. container:: indent1 - - Call this routine before directly accessing the ``ens_handle%vars`` array for writing. This routine differs from the - 'update' version in that it doesn't care what the original data state is. This routine might be used in the case - where an array is being filled for the first time and consistency with the data in the copies array is not an issue. - - Internally the ensemble manager tracks which of the copies or vars arrays, or both, have the most recently updated - representation of the data. For example, before a transpose (``all_vars_to_all_copies()`` or - ``all_copies_to_all_vars()``) the code checks to be sure the source array has the most recently updated - representation before it does the operation. After a transpose both representations have the same update time and are - both valid. - - For efficiency reasons we allow the copies and vars arrays to be accessed directly from other code without going - through a routine in the ensemble manager. The "prepare" routines verify that the desired array has the most recently - updated representation of the data, and if needed marks which one has been updated so the internal consistency checks - have an accurate accounting of the representations. - - ============== ================================================ - ``ens_handle`` Handle for the ensemble being accessed directly. - ============== ================================================ - -| - -.. container:: routine - - *call prepare_to_write_to_copies(ens_handle)* - :: - - type(ensemble_type), intent(inout) :: ens_handle - -.. container:: indent1 - - Call this routine before directly accessing the ``ens_handle%copies`` array for writing. This routine differs from - the 'update' version in that it doesn't care what the original data state is. This routine might be used in the case - where an array is being filled for the first time and consistency with the data in the vars array is not an issue. - - Internally the ensemble manager tracks which of the copies or vars arrays, or both, have the most recently updated - representation of the data. For example, before a transpose (``all_vars_to_all_copies()`` or - ``all_copies_to_all_vars()``) the code checks to be sure the source array has the most recently updated - representation before it does the operation. After a transpose both representations have the same update time and are - both valid. - - For efficiency reasons we allow the copies and vars arrays to be accessed directly from other code without going - through a routine in the ensemble manager. The "prepare" routines verify that the desired array has the most recently - updated representation of the data, and if needed marks which one has been updated so the internal consistency checks - have an accurate accounting of the representations. - - ============== ================================================ - ``ens_handle`` Handle for the ensemble being accessed directly. - ============== ================================================ - -| - Private interfaces ------------------ diff --git a/assimilation_code/programs/integrate_model/integrate_model.f90 b/assimilation_code/programs/integrate_model/integrate_model.f90 index 587dd4e1ee..fc385aa49c 100644 --- a/assimilation_code/programs/integrate_model/integrate_model.f90 +++ b/assimilation_code/programs/integrate_model/integrate_model.f90 @@ -20,8 +20,8 @@ program integrate_model use assim_model_mod, only : static_init_assim_model, get_model_size use obs_model_mod, only : advance_state -use ensemble_manager_mod, only : init_ensemble_manager, ensemble_type, & - prepare_to_write_to_vars +use ensemble_manager_mod, only : init_ensemble_manager, ensemble_type + use mpi_utilities_mod, only : initialize_mpi_utilities, finalize_mpi_utilities, & task_count, iam_task0 @@ -119,7 +119,6 @@ program integrate_model ! Initialize an ensemble manager type with a single copy call init_ensemble_manager(ens_handle, num_copies=1, num_vars=model_size, transpose_type_in = 2) -call prepare_to_write_to_vars(ens_handle) !------------------- Read restart from file ---------------------- diff --git a/assimilation_code/programs/perfect_model_obs/perfect_model_obs.f90 b/assimilation_code/programs/perfect_model_obs/perfect_model_obs.f90 index 6417785e2c..7013fe2774 100644 --- a/assimilation_code/programs/perfect_model_obs/perfect_model_obs.f90 +++ b/assimilation_code/programs/perfect_model_obs/perfect_model_obs.f90 @@ -36,9 +36,8 @@ program perfect_model_obs use random_seq_mod, only : random_seq_type, init_random_seq, random_gaussian use ensemble_manager_mod, only : init_ensemble_manager, & end_ensemble_manager, ensemble_type, & - get_my_num_copies, get_ensemble_time, prepare_to_write_to_vars, & - prepare_to_read_from_vars, allocate_vars, & - all_vars_to_all_copies, & + get_my_num_copies, get_ensemble_time, & + allocate_vars, all_vars_to_all_copies, & all_copies_to_all_vars use filter_mod, only : filter_set_initial_time, filter_sync_keys_time @@ -465,8 +464,6 @@ subroutine perfect_main() call trace_message('After setup for next group of observations') - call prepare_to_read_from_vars(ens_handle) - ! Output the true state to the netcdf file if((output_interval > 0) .and. & (time_step_number / output_interval * output_interval == time_step_number)) then diff --git a/conf.py b/conf.py index 0dd2606f18..4ecb5b9aaf 100644 --- a/conf.py +++ b/conf.py @@ -21,7 +21,7 @@ author = 'Data Assimilation Research Section' # The full version, including alpha/beta/rc tags -release = '11.3.0' +release = '11.3.1' root_doc = 'index' # -- General configuration --------------------------------------------------- diff --git a/models/wrf/shell_scripts/assim_advance.csh b/models/wrf/shell_scripts/assim_advance.csh index 1db7d64d26..b9fa8e5e44 100755 --- a/models/wrf/shell_scripts/assim_advance.csh +++ b/models/wrf/shell_scripts/assim_advance.csh @@ -50,7 +50,7 @@ if ( -e $RUN_DIR/advance_temp${emember}/wrf.info ) then endif touch wrf.info -if ( $SUPER_PLATFORM == 'yellowstone' ) then +if ( $SUPER_PLATFORM == 'LSF queuing system' ) then cat >! $RUN_DIR/advance_temp${emember}/wrf.info << EOF ${gdatef[2]} ${gdatef[1]} @@ -60,7 +60,7 @@ $yyyy $mm $dd $hh $nn $ss mpirun.lsf ./wrf.exe EOF -else if ( $SUPER_PLATFORM == 'cheyenne' ) then +else if ( $SUPER_PLATFORM == 'derecho' ) then # module load openmpi cat >! $RUN_DIR/advance_temp${emember}/wrf.info << EOF @@ -68,7 +68,7 @@ ${gdatef[2]} ${gdatef[1]} ${gdate[2]} ${gdate[1]} $yyyy $mm $dd $hh $nn $ss $domains - mpiexec_mpt dplace -s 1 ./wrf.exe + mpiexec -n 128 -ppn 128 ./wrf.exe EOF endif diff --git a/models/wrf/shell_scripts/assimilate.csh b/models/wrf/shell_scripts/assimilate.csh index 871f0f311d..00cce0d190 100755 --- a/models/wrf/shell_scripts/assimilate.csh +++ b/models/wrf/shell_scripts/assimilate.csh @@ -23,20 +23,19 @@ if ( -e ${RUN_DIR}/obs_seq.final ) ${REMOVE} ${RUN_DIR}/obs_seq.final if ( -e ${RUN_DIR}/filter_done ) ${REMOVE} ${RUN_DIR}/filter_done # run data assimilation system -if ( $SUPER_PLATFORM == 'yellowstone' ) then +if ( $SUPER_PLATFORM == 'LSF queuing system' ) then setenv TARGET_CPU_LIST -1 setenv FORT_BUFFERED true mpirun.lsf ./filter || exit 1 -else if ( $SUPER_PLATFORM == 'cheyenne' ) then +else if ( $SUPER_PLATFORM == 'derecho' ) then -# TJH MPI_SHEPHERD TRUE may be a very bad thing setenv MPI_SHEPHERD FALSE setenv TMPDIR /dev/shm limit stacksize unlimited - mpiexec_mpt dplace -s 1 ./filter || exit 1 + mpiexec -n 256 -ppn 128 ./filter || exit 1 endif diff --git a/models/wrf/shell_scripts/driver.csh b/models/wrf/shell_scripts/driver.csh index 3c609bbab1..289a79e6e8 100755 --- a/models/wrf/shell_scripts/driver.csh +++ b/models/wrf/shell_scripts/driver.csh @@ -78,13 +78,13 @@ while ( 1 == 1 ) # # NOTE that multiple domains might be present, but only looking for domain 1 - if ( $SUPER_PLATFORM == 'yellowstone' ) then + if ( $SUPER_PLATFORM == 'LSF queuing system' ) then set ic_queue = caldera set logfile = "${RUN_DIR}/ic_gen.log" set sub_command = "bsub -q ${ic_queue} -W 00:05 -o ${logfile} -n 1 -P ${COMPUTER_CHARGE_ACCOUNT}" - else if ( $SUPER_PLATFORM == 'cheyenne' ) then - set ic_queue = "economy" - set sub_command = "qsub -l select=1:ncpus=2:mpiprocs=36:mem=5GB -l walltime=00:03:00 -q ${ic_queue} -A ${COMPUTER_CHARGE_ACCOUNT} -j oe -k eod -N icgen " + else if ( $SUPER_PLATFORM == 'derecho' ) then + set ic_queue = "main" + set sub_command = "qsub -l select=1:ncpus=128:mpiprocs=128:mem=5GB -l walltime=00:03:00 -q ${ic_queue} -A ${COMPUTER_CHARGE_ACCOUNT} -j oe -k eod -N icgen " endif echo "this platform is $SUPER_PLATFORM and the job submission command is $sub_command" @@ -120,7 +120,7 @@ while ( 1 == 1 ) set n = 1 while ( $n <= $NUM_ENS ) - if ( $SUPER_PLATFORM == 'cheyenne' ) then # can't pass along arguments in the same way + if ( $SUPER_PLATFORM == 'derecho' ) then # can't pass along arguments in the same way $sub_command -v mem_num=${n},date=${datep},domain=${domains},paramf=${paramfile} ${SHELL_SCRIPTS_DIR}/prep_ic.csh else $sub_command " ${SHELL_SCRIPTS_DIR}/prep_ic.csh ${n} ${datep} ${dn} ${paramfile} " @@ -147,9 +147,8 @@ while ( 1 == 1 ) @ loop++ if ( $loop > 60 ) then # wait 5 minutes for the ic file to be ready, else run manually echo "gave up on ic member $n - redo" - # TJH this is not the command for cheyenne, why not $sub_command from above ${SHELL_SCRIPTS_DIR}/prep_ic.csh ${n} ${datep} ${dn} ${paramfile} - # TJH the job queued above is still queued and should be killed ... + # If manual execution of script, shouldn't queued job be killed? endif endif end @@ -210,7 +209,7 @@ while ( 1 == 1 ) # run filter to generate the analysis ${REMOVE} script.sed - if ( $SUPER_PLATFORM == 'yellowstone' ) then + if ( $SUPER_PLATFORM == 'LSF queuing system' ) then # This is a most unusual application of 'sed' to insert the batch submission # directives into a file. The last backslash '\' before the quote is essential. @@ -241,7 +240,7 @@ while ( 1 == 1 ) endif set this_filter_runtime = $FILTER_TIME - else if ( $SUPER_PLATFORM == 'cheyenne' ) then + else if ( $SUPER_PLATFORM == 'derecho' ) then echo "2i\" >! script.sed echo "#=================================================================\" >> script.sed @@ -250,6 +249,7 @@ while ( 1 == 1 ) echo "#PBS -A ${COMPUTER_CHARGE_ACCOUNT}\" >> script.sed echo "#PBS -l walltime=${FILTER_TIME}\" >> script.sed echo "#PBS -q ${FILTER_QUEUE}\" >> script.sed + echo "#PBS -l job_priority=${FILTER_PRIORITY}\" >> script.sed echo "#PBS -m ae\" >> script.sed echo "#PBS -M ${EMAIL}\" >> script.sed echo "#PBS -k eod\" >> script.sed @@ -382,7 +382,7 @@ while ( 1 == 1 ) set n = 1 while ( $n <= $NUM_ENS ) - if ( $SUPER_PLATFORM == 'yellowstone' ) then + if ( $SUPER_PLATFORM == 'LSF queuing system' ) then echo "2i\" >! script.sed echo "#==================================================================\" >> script.sed @@ -407,7 +407,7 @@ while ( 1 == 1 ) bsub < assim_advance_mem${n}.csh endif - else if ( $SUPER_PLATFORM == 'cheyenne' ) then + else if ( $SUPER_PLATFORM == 'derecho' ) then echo "2i\" >! script.sed echo "#=================================================================\" >> script.sed @@ -416,6 +416,7 @@ while ( 1 == 1 ) echo "#PBS -A ${COMPUTER_CHARGE_ACCOUNT}\" >> script.sed echo "#PBS -l walltime=${ADVANCE_TIME}\" >> script.sed echo "#PBS -q ${ADVANCE_QUEUE}\" >> script.sed + echo "#PBS -l job_priority=${ADVANCE_PRIORITY}\" >> script.sed echo "#PBS -m a\" >> script.sed echo "#PBS -M ${EMAIL}\" >> script.sed echo "#PBS -k eod\" >> script.sed @@ -456,7 +457,7 @@ while ( 1 == 1 ) # Wait for the script to start while ( ! -e ${RUN_DIR}/start_member_${n} ) - if ( $SUPER_PLATFORM == 'yellowstone' ) then + if ( $SUPER_PLATFORM == 'LSF queuing system' ) then if ( `bjobs -w | grep assim_advance_${n} | wc -l` == 0 ) then @@ -470,7 +471,7 @@ while ( 1 == 1 ) endif - else if ( $SUPER_PLATFORM == 'cheyenne' ) then + else if ( $SUPER_PLATFORM == 'derecho' ) then if ( `qstat -wa | grep assim_advance_${n} | wc -l` == 0 ) then @@ -502,7 +503,7 @@ while ( 1 == 1 ) # Obviously, the job crashed. Resubmit to queue ${REMOVE} start_member_${n} echo "didn't find the member done file" - if ( $SUPER_PLATFORM == 'yellowstone' ) then + if ( $SUPER_PLATFORM == 'LSF queuing system' ) then if ( $?reservation ) then echo "MEMBER ${n} USING RESERVATION," `/contrib/lsf/get_my_rsvid` @@ -511,7 +512,7 @@ while ( 1 == 1 ) bsub < assim_advance_mem${n}.csh endif - else if ( $SUPER_PLATFORM == 'cheyenne' ) then + else if ( $SUPER_PLATFORM == 'derecho' ) then qsub assim_advance_mem${n}.csh diff --git a/models/wrf/shell_scripts/first_advance.csh b/models/wrf/shell_scripts/first_advance.csh index 28e7fe2f3a..32ec373706 100755 --- a/models/wrf/shell_scripts/first_advance.csh +++ b/models/wrf/shell_scripts/first_advance.csh @@ -38,7 +38,7 @@ endif touch wrf.info -if ( $SUPER_PLATFORM == 'yellowstone' ) then +if ( $SUPER_PLATFORM == 'LSF queuing system' ) then cat >! $RUN_DIR/advance_temp${emember}/wrf.info << EOF ${gdatef[2]} ${gdatef[1]} @@ -48,11 +48,8 @@ if ( $SUPER_PLATFORM == 'yellowstone' ) then mpirun.lsf ./wrf.exe EOF -else if ( $SUPER_PLATFORM == 'cheyenne' ) then +else if ( $SUPER_PLATFORM == 'derecho' ) then - # TJH MPI_IB_CONGESTED, MPI_LAUNCH_TIMEOUT used after cheyenne O/S change in July 2019 - # TJH setenv MPI_IB_CONGESTED 1 - # TJH setenv MPI_LAUNCH_TIMEOUT 40 setenv MPI_SHEPHERD false cat >! $RUN_DIR/advance_temp${emember}/wrf.info << EOF @@ -60,7 +57,7 @@ else if ( $SUPER_PLATFORM == 'cheyenne' ) then ${gdate[2]} ${gdate[1]} $yyyy $mm $dd $hh $nn $ss $domains - mpiexec_mpt dplace -s 1 ./wrf.exe + mpiexec -n 128 -ppn 128 ./wrf.exe EOF endif diff --git a/models/wrf/shell_scripts/gen_retro_icbc.csh b/models/wrf/shell_scripts/gen_retro_icbc.csh index 4b2bb6bf49..14b4fb3b69 100755 --- a/models/wrf/shell_scripts/gen_retro_icbc.csh +++ b/models/wrf/shell_scripts/gen_retro_icbc.csh @@ -42,7 +42,7 @@ echo "gen_retro_icbc.csh is running in `pwd`" set datea = 2017042700 set datefnl = 2017042712 # set this appropriately #%%%# -set paramfile = /glade2/scratch2/USERNAME/WORK_DIR/scripts/param.csh # set this appropriately #%%%# +set paramfile = /glade/derecho/scratch/USERNAME/WORK_DIR/scripts/param.csh # set this appropriately #%%%# source $paramfile @@ -171,22 +171,23 @@ EOF #if ( -e rsl.out.0000 ) cat rsl.out.0000 >> out.real.exe rm script.sed real_done rsl.* - echo "2i\" >! script.sed - echo "#======================================\" >> script.sed - echo "#PBS -N run_real\" >> script.sed - echo "#PBS -A ${COMPUTER_CHARGE_ACCOUNT}\" >> script.sed - echo "#PBS -l walltime=00:05:00\" >> script.sed - echo "#PBS -q ${ADVANCE_QUEUE}\" >> script.sed - echo "#PBS -o run_real.out\" >> script.sed - echo "#PBS -j oe\" >> script.sed - echo "#PBS -k eod\" >> script.sed - echo "#PBS -l select=3:ncpus=36:mpiprocs=36\" >> script.sed - echo "#PBS -V\" >> script.sed - echo "#======================================\" >> script.sed - echo "\" >> script.sed - echo "" >> script.sed - echo 's%${1}%'"${paramfile}%g" >> script.sed - sed -f script.sed ${SHELL_SCRIPTS_DIR}/real.csh >! real.csh + echo "2i\" >! script.sed + echo "#======================================\" >> script.sed + echo "#PBS -N run_real\" >> script.sed + echo "#PBS -A ${COMPUTER_CHARGE_ACCOUNT}\" >> script.sed + echo "#PBS -l walltime=00:05:00\" >> script.sed + echo "#PBS -q ${ADVANCE_QUEUE}\" >> script.sed + echo "#PBS -l job_priority=${ADVANCE_PRIORITY}\" >> script.sed + echo "#PBS -o run_real.out\" >> script.sed + echo "#PBS -j oe\" >> script.sed + echo "#PBS -k eod\" >> script.sed + echo "#PBS -l select=1:ncpus=128:mpiprocs=128\" >> script.sed + echo "#PBS -V\" >> script.sed + echo "#======================================\" >> script.sed + echo "\" >> script.sed + echo "" >> script.sed + echo 's%${1}%'"${paramfile}%g" >> script.sed + sed -f script.sed ${SHELL_SCRIPTS_DIR}/real.csh >! real.csh qsub real.csh diff --git a/models/wrf/shell_scripts/init_ensemble_var.csh b/models/wrf/shell_scripts/init_ensemble_var.csh index 3b1196e00a..a6af8845a4 100755 --- a/models/wrf/shell_scripts/init_ensemble_var.csh +++ b/models/wrf/shell_scripts/init_ensemble_var.csh @@ -83,6 +83,7 @@ EOF #PBS -A ${COMPUTER_CHARGE_ACCOUNT} #PBS -l walltime=${ADVANCE_TIME} #PBS -q ${ADVANCE_QUEUE} +#PBS -l job_priority=${ADVANCE_PRIORITY} #PBS -m ae #PBS -M ${EMAIL} #PBS -k eod diff --git a/models/wrf/shell_scripts/param.csh b/models/wrf/shell_scripts/param.csh index e41ccdb1d8..a9c13f66e8 100755 --- a/models/wrf/shell_scripts/param.csh +++ b/models/wrf/shell_scripts/param.csh @@ -9,8 +9,7 @@ # ASSIM_INT_MINUTES support needs to be added to param.csh, # it is referenced in assim_advance.csh but not declared in param.csh -# Set up environment. Current settings are for NCAR's Cheyenne -module load mpt # set this appropriately #%%%# +# Set up environment. Current settings are for NCAR's Derecho module load nco # set this appropriately #%%%# module load ncl/6.6.2 # set this appropriately #%%%# @@ -25,7 +24,7 @@ set NUM_DOMAINS = 1 # Directories where things are run # IMPORTANT : Scripts provided rely on this directory structure and names relative to BASE_DIR. # Do not change, otherwise tutorial will fail. -set BASE_DIR = /glade2/scratch2/USER/WORK_DIR # set this appropriately #%%%# +set BASE_DIR = /glade/derecho/scratch/USER/WORK_DIR # set this appropriately #%%%# set RUN_DIR = ${BASE_DIR}/rundir set TEMPLATE_DIR = ${BASE_DIR}/template set OBSPROC_DIR = ${BASE_DIR}/obsproc @@ -37,15 +36,15 @@ set PERTS_DIR = ${BASE_DIR}/perts # Directories that can be used by many things set SHELL_SCRIPTS_DIR = ${BASE_DIR}/scripts -set DART_DIR = /glade/p/work/USER/DART_manhattan # set this appropriately #%%%# -set WRF_DM_SRC_DIR = /glade/p/work/USER/WRFV3_dmpar # set this appropriately #%%%# -set WPS_SRC_DIR = /glade/p/work/USER/WPS # set this appropriately #%%%# -set VAR_SRC_DIR = /glade/p/work/USER/WRFDA # set this appropriately #%%%# +set DART_DIR = /glade/work/USER/DART # set this appropriately #%%%# +set WRF_DM_SRC_DIR = /glade/work/USER/WRFV3 # set this appropriately #%%%# +set WPS_SRC_DIR = /glade/work/USER/WPS # set this appropriately #%%%# +set VAR_SRC_DIR = /glade/work/USER/WRFDA # set this appropriately #%%%# # for generating wrf template files -set GEO_FILES_DIR = /glade/p/work/USER/WPS # set this appropriately #%%%# -set GRIB_DATA_DIR = /glade/p/work/USER/WPS/GRIB # set this appropriately #%%%# -set GRIB_SRC = 'GFS' # set this appropriately #%%%# +set GEO_FILES_DIR = /glade/u/home/wrfhelp/WPS_GEOG # set this appropriately #%%%# +set GRIB_DATA_DIR = ${ICBC_DIR}/grib_data # set this appropriately #%%%# +set GRIB_SRC = 'GFS' # set this appropriately #%%%# # list of variables for extraction and cycling set extract_vars_a = ( U V PH T MU QVAPOR QCLOUD QRAIN QICE QSNOW QGRAUP QNICE QNRAIN \ @@ -61,31 +60,29 @@ set increment_vars_a = ( U V PH T MU QVAPOR QCLOUD QRAIN QICE QSNOW QGRAUP QNICE set OBS_VERIF_DAYS = 7 # Generic queuing system parameters -set SUPER_PLATFORM = cheyenne - -# TJH consistent way of checking the SUPER_PLATFORM and injecting that -# header information into the scripts ... rather than have scripts -# that have redundant blocks in them ... -# +set SUPER_PLATFORM = derecho set COMPUTER_CHARGE_ACCOUNT = YOUR_ACCT # set this appropriately #%%%# -set EMAIL = YOUR_EMAIL@SOMEPLACE.COM # set this appropriately #%%%# +set EMAIL = YOUR_EMAIL # set this appropriately #%%%# -if ( $SUPER_PLATFORM == 'cheyenne') then - # cheyenne values (uses 'PBS' queueing system) - # set this appropriately #%%%# ... ALL OF THESE if using PBS - set FILTER_QUEUE = regular +if ( $SUPER_PLATFORM == 'derecho') then + # Derecho values (uses 'PBS' queueing system) + # Set these appropriately for your PBS system #%%%# + set FILTER_QUEUE = main + set FILTER_PRIORITY = premium set FILTER_TIME = 0:35:00 - set FILTER_NODES = 10 - set FILTER_PROCS = 36 - set FILTER_MPI = 36 - set ADVANCE_QUEUE = regular - set ADVANCE_TIME = 0:20:00 - set ADVANCE_NODES = 3 - set ADVANCE_PROCS = 36 - set ADVANCE_MPI = 36 + set FILTER_NODES = 2 + set FILTER_PROCS = 128 + set FILTER_MPI = 128 + + set ADVANCE_QUEUE = main + set ADVANCE_PRIORITY = premium + set ADVANCE_TIME = 0:20:00 + set ADVANCE_NODES = 1 + set ADVANCE_PROCS = 128 + set ADVANCE_MPI = 128 else - # yellowstone (uses 'LSF' queueing system) - # set this appropriately #%%%# ... ALL OF THESE if using LSF + # 'LSF' queueing system example + # Set these appropriately for your LSF or Slurm system #%%%# set FILTER_QUEUE = regular set FILTER_TIME = 0:25 set FILTER_CORES = 512 @@ -97,8 +94,6 @@ else endif # System specific commands -# TJH ... The LINK command probably should not have the force option. -# TJH ... and if the LINK fails, should it die right there? setenv REMOVE 'rm -rf' setenv COPY 'cp -pfr' setenv MOVE 'mv -f' diff --git a/models/wrf/shell_scripts/real.csh b/models/wrf/shell_scripts/real.csh index 0185106bc9..d1459b6168 100755 --- a/models/wrf/shell_scripts/real.csh +++ b/models/wrf/shell_scripts/real.csh @@ -5,7 +5,7 @@ source $paramfile cd ${ICBC_DIR} - mpiexec_mpt dplace -s 1 ${RUN_DIR}/WRF_RUN/real.exe + mpiexec -n 128 -ppn 128 ${RUN_DIR}/WRF_RUN/real.exe #if ( `grep "Successful completion of program real.exe" ./rsl.out.0000 | wc -l ` == 1 ) touch ${ICBC_DIR}/real_done diff --git a/models/wrf/tutorial/README.rst b/models/wrf/tutorial/README.rst index 1acd4919a4..c64b43a874 100644 --- a/models/wrf/tutorial/README.rst +++ b/models/wrf/tutorial/README.rst @@ -8,49 +8,32 @@ Introduction This document will describe how to get started with your own Weather Research and Forecasting (WRF) data assimilation experiments using DART -and only covers only the WRF-specific aspects of integrating with DART. -It is not wise to try to run WRF/DART if you have no experience with WRF -and/or no experience with DART. +and only covers the WRF-specific aspects of coupling with DART. +It is not wise to try to run WRF-DART if you have no experience with +either WRF or DART. -This tutorial was assembled to be compatible with ~WRF V3.9.1 and the -DART Manhattan release. Other releases of WRF may or may not be +This tutorial was designed to be compatible with WRF V3.9.1 and was +tested with DART V11.0.2. Other releases of WRF may or may not be backwards or forwards compatible with this tutorial. +Prior to running this tutorial, we urge the users to familarize themselves with the +`WRF system `__ +(WRF_ARW, WPS and WRFDA), and to read through the `WRFv3.9 User's Guide +`__ +and the `WRF model tutorials `__ -You must already be comfortable running the -`WRF `__ -system (WPS, real_em build of WRF). If not, work through the `WRF model -tutorial `__ -first before trying to link WRF and DART together. Check the WRF user -guide or the -`WRFHELP `__ -forum for WRF-specific assistance. +The DART team is not responsible for and does not maintain the WRF code. For WRF related issues check out the +`WRF User Forum `__ +or the `WRF github page. `__ If you are new to DART, we recommend that you become familiar with DART by working through the :doc:`../../../theory/readme` and then understanding the :ref:`DART getting started ` documentation. -before attempting the WRF/DART tutorial as you will find many helpful -resources for learning the base DART configuration. - -*We do not claim that this is a “turnkey” or “black box” system.* Be -mentally prepared to invest a reasonable amount of time on the learning -curve. There are many outstanding research issues which have no easy -answers. This is not a one week/grad student/naive user system. Even -after you get the code up and running, you have to be able to interpret -the results, which requires developing specific skills. There are a lot -of ways to alter how the system works – localization, inflation, which -variables and observations are assimilated, the assimilation window -time, the model resolution, etc, etc. This is both good and bad - you -have many ways of improving your results, but you have to take care on -how you leave all the settings of these inputs. Getting a set of scripts -that runs doesn’t mean the system is running well, or producing useful -results. So - if you’re still reading: Let the adventure begin! - -This tutorial introduces a “canned” WRF/DART experiment involving an +This tutorial is **not** a toy simulation, but represents a realistic WRF-DART +assimilation for the continental United States. It uses a WRF ensemble of 50 members that will be initialized from GFS initial -conditions at 2017/04/27 00:00 UTC using a domain of the continental -United States. The data included in the tutorial lasts until 2017/04/30 -18:00 UTC. During this period, there was a strong rain and wind event +conditions at 2017/04/27 00:00 UTC. The data included in the tutorial lasts +until 2017/04/30 18:00 UTC. During this period, there was a strong rain and wind event that affected a large portion of the United States, causing record rains, localized flooding, and numerous tornadoes. For more information on the physical account of this case, see @@ -63,46 +46,67 @@ observations will then be performed at 06:00 UTC, at which time analysis files will be generated to begin a new ensemble forecast. The WRF model will be advanced for 6 hours and a final assimilation cycle will be performed at 12:00 UTC. This process could then continue in order to -investigate the strong rain and wind event. For what it's worth, on -NSF NCAR's *Cheyenne* under the default test configuration for this case, it -can take an hour to complete a forecast/assimilation cycle. Since the -tutorial runs for two cycles, it can take twice as long. - -The goals of this tutorial are to demonstrate how WRF/DART works. After -running this tutorial, you will be able to understand the major steps -involved in setting up your own data assimilation (DA) experiments. -However, you will need to do additional work before you can expect to -have a fully functional WRF/DART system, as some of the steps involved +investigate the strong rain and wind event. On NSF NCAR's *Derecho*, +the tutorial requires at least 30 minutes of run time, and can take +much longer (1-2 hours) depending upon the PBS queue wait time. + +The goal of this tutorial is to demonstrate how WRF-DART works, and to provide an +understanding of the major steps within a data assimilation (DA) experiment. +However, you will need to do additional work before you can apply +WRF-DART to your own research application, as some of the steps involved in this tutorial (in particular, the perturbation bank and the observation sequence files) are provided for you in order to simplify -the process. Furthermore, if you are not running on the NSF NCAR -Cheyenne supercomputing system, you will likely need to customize the -assimilation scripts to match the details of your particular system. +the process. We provide a diagnostic section at the end of the tutorial to +assess the skill/success of the assimilation. Be aware, an assimilation is +not successful just because it runs to completion. A successful assimilation +generally uses the vast majority of the observations provided and minimizes +the bias and RMSE between the posterior model state and the observations. +Finally, if you are not running on the NSF NCAR Derecho (PBS) supercomputing system, you will +need to customize the assimilation scripts (located in /DART/models/wrf/shell_scripts/) to match the details of your particular system. +Specifically, you will need to edit the DART csh scripting to match your system settings +whether that be, for example, a PBS, SLURM or LSF HPC system. Although the DART team can +offer advice on how to customize the scripting to accomodate your HPC system, your +HPC system administrator is likely the best resource to resolve these issues. -.. important :: - We have provided instructions for the NSF NCAR supercomputer - Cheyenne, so you may need to tailor these instructions to your system if - you are not using Cheyenne. These system-specific setup steps may take a - good deal of effort, especially if you are unfamiliar with details such - as MPI, NetCDF, etc. Furthermore, even after you get the code up and - running, you will need to properly interpret your results. +.. Important :: + + The tutorial scripting and instructions are based on the NSF NCAR supercomputer + Derecho, so you will need to edit the scripts and interpret the instructions for + other HPC systems. The scripting uses examples of a PBS queuing system (e.g. Derecho) + and LSF queuing system (e.g. decommissioned Yellowstone). You can use these as a + template for your own system. Step 1: Setup ------------- -There are several dependencies for the executables and scripting -components. On Cheyennne, users have reported success building WRF, WPS, -WRFDA, and DART with the default module environment including Intel -compilers, MPT, and netCDF4. In addition, you'll need to load the +There are several required dependencies for the executables and WRF-DART scripting +components. On NSF NCAR's Derecho, users have reported success building WRF, WPS, +WRFDA, and DART using gfortan with the following module environment. Note: not all +modules listed below are a requirement to compile and run the tutorial. + + :: + + Currently Loaded Modules: + 1) ncarenv/23.09 (S) 3) udunits/2.2.28 5) ncarcompilers/1.0.0 7) cray-mpich/8.1.27 9) netcdf-mpi/4.9.2 + 2) gcc/12.2.0 4) ncview/2.1.9 6) craype/2.7.23 8) hdf5-mpi/1.12.2 10) hdf/4.2.15 + +In addition, you'll need to load the `nco `__ and `ncl `__ modules to run the set of scripts -that accompany the tutorial. +that accompany the tutorial. For Derecho the nco and ncl +packages can be automatically loaded using the following commands: -There are multiple phases for the setup: building the DART executables, -getting the initial WRF boundary conditions etc., building (or using + :: + + module load nco + module load ncl/6.6.2 + +These commands are provided by default with the param.csh script. More details +are provided below. There are multiple phases for the setup: building the DART executables, +downloading the initial WRF boundary conditions, building (or using existing) WRF executables, and configuring and staging the scripting needed to perform an experiment. @@ -138,6 +142,13 @@ might need for an experiment with that model. not, you will need to do so now. See :ref:`Getting Started ` for more detail, if necessary. +.. Important :: + + If using gfortan to compile DART on Derecho, a successful configuration + of the ``mkmf.template`` includes using the ``mkmf.template.gfortan`` script + and customizing the compiler flags as follows: + FFLAGS = -O2 -ffree-line-length-none -fallow-argument-mismatch -fallow-invalid-boz $(INCS) + 2. [OPTIONAL] Modify the DART code to use 32bit reals. Most WRF/DART users run both the WRF model and the DART assimilation code using 32bit reals. This is not the default for the DART code. Make this @@ -176,7 +187,7 @@ might need for an experiment with that model. cd $DART_DIR/models/wrf cp tutorial/template/input.nml.template work/input.nml -4. Build the WRF/DART executables: +4. Build the WRF-DART executables: :: @@ -204,7 +215,8 @@ Preparing the experiment directory. Approximately 100Gb of space is needed to run the tutorial. Create a "work" directory someplace with a lot of free space. The rest of the instructions assume you have an environment variable called *BASE_DIR* -that points to this directory. +that points to this directory. On Derecho it is convenient to use your +scratch directory for this purpose. ===== ==================================================== shell command @@ -245,40 +257,50 @@ bash ``export BASE_DIR=`` mkdir $BASE_DIR/scripts cp -R $DART_DIR/models/wrf/shell_scripts/* $BASE_DIR/scripts -Build or locate WRF executables. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The -`WRFDA `__ -package is needed to generate a set of perturbed initial ensemble member +Build or locate the WRF, WPS and WRFDA executables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Instruction for donwloading the WRF package is located +`here. `__ +The WRF package consists of 3 parts: the WRF atmospheric model WRF(ARW), the +WRF Preprocessing System (WPS) and WRF Data Assimilation System (WRFDA). + +Importantly, DART is used to perform the ensemble DA for this tutorial, however, +the WRFDA package is required to generate a set of perturbed initial ensemble member files and also to generate perturbed boundary condition files. Since the tutorial provides a perturbation bank for a specific case, it is not required to actually *run da_wrfvar.exe* but it needs to be in the ``WRF_RUN`` directory for the tutorial. -Build (or locate an appropriate build of) WRF, WPS and WRFDA. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - WRF and WRFDA should be built with the "dmpar" option, while WPS can be -built "serial"ly. See the WRF/WRFDA documentation for more information +built "serial"ly. See the WRF documentation for more information about building these packages. -.. note:: +.. Warning:: For consistency and to avoid errors, you should build WRF, WPS, WRFDA, and DART with the same compiler you use for NetCDF. Likewise MPI should use the same compiler. You will need the location of the WRF and WRFDA builds to customize the - *params.csh* script in the next step. + *params.csh* script in the next step. If using gfortran to compile WRF on Derecho + we recommend using option 34 (gnu dmpar) to configure WRF, option 1 (gnu serial) to + configure WPS, and option 34 (gnu dmpar) to configure WRFDA. You will need the location + of the WRF, WPS,and WRFDA builds to customize the *params.csh* script in the next step. + + Using the gfortan compiler on Derecho required custom flag settings to successfully + compile the WRF, WPS and WRFDA executables. For more information please see + NCAR/DART `github issue 627. `__ + Configure ``$BASE_DIR/scripts/param.csh`` with proper paths, info, etc. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is a script that sets variables which will be read by other -WRF/DART scripts. There are some specific parameters for either the -Cheyenne supercomputing system using the +WRF-DART scripts. There are some specific parameters for either the +Derecho supercomputing system using the `PBS `__ queueing system or the (decommissioned) Yellowstone system which used the *LSF* queueing -system. If you are not using Cheyenne, you may still want to use this +system. If you are not using Derecho, you may still want to use this script to set your queueing-system specific parameters. .. important:: @@ -291,8 +313,6 @@ script to set your queueing-system specific parameters. +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | Script variable | Description | +=========================+=====================================================================================================================================================+ - | module load mpt | The Environment Modules MPI compiler to use (here the HPE MPI) compiler). Note that on Cheyenne the default compiler is Intel. | - +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | module load nco | The nco package. | +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | module load ncl/6.6.2 | The ncl package. | @@ -307,7 +327,7 @@ script to set your queueing-system specific parameters. +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | VAR_SRC_DIR | The directory of the WRFDA installation. | +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ - | GEO_FILES_DIR | The root directory of the WPS_GEOG files. NOTE: on Cheyenne these are available in the /glade/u/home/wrfhelp/WPS_GEOG directory | + | GEO_FILES_DIR | The root directory of the WPS_GEOG files. NOTE: on Derecho these are available in the /glade/u/home/wrfhelp/WPS_GEOG directory | +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | GRIB_DATA_DIR | The root directory of the GRIB data input into ungrib.exe. For this tutorial the grib files are included, so use ${ICBC_DIR}/grib_data | +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -315,7 +335,7 @@ script to set your queueing-system specific parameters. +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | COMPUTER_CHARGE_ACCOUNT | The project account for supercomputing charges. See your supercomputing project administrator for more information. | +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ - | EMAIL | The e-mail address used by the queueing system to send job summary information. | + | EMAIL | The e-mail address used by the queueing system to send job summary information. This is optional. | +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -427,7 +447,7 @@ find the following scripts: +-----------------------+-------------------------------------------------------------------------------------------+ | new_advance_model.csh | advances the WRF model after running DART in a cycling context. | +-----------------------+-------------------------------------------------------------------------------------------+ -| param.csh | Contains most of the key settings to run the WRF/DART system. | +| param.csh | Contains most of the key settings to run the WRF-DART system. | +-----------------------+-------------------------------------------------------------------------------------------+ | prep_ic.csh | Prepares the initial conditions for a single ensemble member. | +-----------------------+-------------------------------------------------------------------------------------------+ @@ -570,14 +590,22 @@ you when each ensemble member has finished. Step 3: Prepare observations [OPTIONAL] --------------------------------------- -For the tutorial exercise, observation sequence files are provided to -enable you to quickly get started running a test WRF/DART system. If you -want to run with the example observations, you can skip to Step -4. - -However, observation processing is critical to the success of running -DART and was covered in :ref:`Getting Started `. In -brief, to add your own observations to WRF/DART you will need to +.. Warning:: + + The observation sequence files to run this tutorial are already provided + for you. If you want to run with the provided tutorial observations, you + can skip to Step 4 right now. If you are interested in using custom + observations for a WRF experiment other than the tutorial you should read on. + The remaining instructions provided below in Step 3 are meant as a guideline + to converting raw PREPBUFR data files into the required ``obs_seq`` format + required by DART. Be aware that there is ongoing discussion of the proper + archived data set (RDA ds090.0 or ds337.0) that should be used to obtain + the PREPBUFR data. See the discussion in `bug report #634 `__. + If you have questions please contact the DART team. + +Observation processing is critical to the success of running +DART and is covered in :ref:`Getting Started `. In +brief, to add your own observations to WRF-DART you will need to understand the relationship between observation definitions and observation sequences, observation types and observation quantities, and understand how observation converters extract observations from their @@ -589,26 +617,22 @@ contain a wide array of observation types from many platforms within a single file. If you wanted to generate your own observation sequence files from -PREPBUFR for an experiment with WRF/DART, you should follow the guidance +PREPBUFR for an experiment with WRF-DART, you should follow the guidance on the `prepbufr <../../../observations/obs_converters/NCEP/prep_bufr/prep_bufr.html>`__ page to build the bufr conversion programs, get observation files for the dates you plan to build an analysis for, and run the codes to generate an observation sequence file. -For completeness, we list here how you could generate these observation -sequence files yourself. - -.. important:: - - the following steps are **not - necessary** for the tutorial as the processed PREPBUFR observation - sequence files have already been provided for you. However, these steps - are provided in order to help users get started with these observations - quickly for their own experiments. +The steps listed below to generate these observation +sequence files are meant as a guideline for NSF NCAR Research Data +Archive data file ds090.0. **Be aware not all required software has been +migrated to Derecho to perform this conversion.** +See `bug report #634 `__ +for more updated information. -To (again, *optionally*) reproduce the observation sequence files in the -*output* directories, you would do the following: +To reproduce the observation sequence files in the *output* directories, +you would do the following: - Go into your DART prep_bufr observation converter directory and install the PREPBUFR utilities as follows: @@ -835,18 +859,17 @@ necessary for ensemble data assimilation, for large models such as WRF that are run on a supercomputer queueing system, an additional layer of scripts is necessary to glue all of the pieces together. A set of scripts is provided with the tutorial tarball to provide you a starting -point for your own WRF/DART system. You will need to edit these scripts, +point for your own WRF-DART system. You will need to edit these scripts, perhaps extensively, to run them within your particular computing -environment. If you will run on NSF NCAR's Cheyenne environment, fewer edits +environment. If you will run on NSF NCAR's Derecho environment, fewer edits may be needed, but you should familiarize yourself with `running jobs on -Cheyenne `__ +Derecho `__ if necessary. A single forecast/assimilation cycle of this tutorial can -take an hour on Cheyenne - longer if debug options are enabled or the -shared nodes are busy - shorter if more cores or a higher optimization -level is acceptable. +take up to 10 minutes on Derecho - longer if debug options are enabled or +if there is a wait time during the queue submission. In this tutorial, we have previously edited the *param.csh* and other -scripts. Throughout the WRF/DART scripts, there are many options to +scripts. Throughout the WRF-DART scripts, there are many options to adjust cycling frequency, domains, ensemble size, etc., which are available when adapting this set of scripts for your own research. To become more famililar with this set of scripts and to eventually make @@ -951,7 +974,7 @@ In some cases there could be multiple obs_epoch*.nc files, but in general, the u should use the obs_epoch file appended with the largest numeric value as it contains the most complete set of observations. The diagnostic scripts used here are included within the DART package, and require a license of Matlab to run. The -commands shown below to run the diagnostics use NSF NCAR's Cheyenne, but a user could +commands shown below to run the diagnostics use NSF NCAR's Derecho, but a user could also run on their local machine. First explore the obs_epoch*.nc file and identify the variety of observations included @@ -1340,12 +1363,12 @@ Additional materials from previous in-person tutorials - Introduction - `DART Lab materials <../../../guide/DART_LAB/DART_LAB.html>`__ -- WRF/DART basic building blocks +- WRF-DART basic building blocks -`slides `__ (some material is outdated) - Computing environment support -`slides `__ -- WRF/DART application examples +- WRF-DART application examples -`slides `__ (some material is outdated) - Observation processing diff --git a/models/wrf/tutorial/template/input.nml.template b/models/wrf/tutorial/template/input.nml.template index 5b3fd1dfab..7ce0c39774 100644 --- a/models/wrf/tutorial/template/input.nml.template +++ b/models/wrf/tutorial/template/input.nml.template @@ -1,3 +1,10 @@ +&probit_transform_nml + / + +&algorithm_info_nml + qceff_table_filename = '' + / + &filter_nml async = 2, adv_ens_command = "./advance_model.csh", @@ -65,10 +72,9 @@ &assim_tools_nml - filter_kind = 1, cutoff = 0.10, sort_obs_inc = .false., - spread_restoration = .true., + spread_restoration = .false., sampling_error_correction = .true., print_every_nth_obs = 1000, adaptive_localization_threshold = 2000,