Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a simple parallel write example #21

Merged
merged 5 commits into from
May 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions xios_examples/write_domain_parallel/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Make file for the parallel write XIOS programme
# Targets provided our detailed below...
#
# all: (default) Build the programme
# clean: Delete all final products and working files
# run: run the programme
#
# Environment Variables expected by this MakeFile:
#
# NETCDF_LIBDIR: the directories for the netCDF lib files
# encoded as a -L string, e.g.
# "-L/dir1 -L/dir2"
# NETCDF_INCDIR: the directories for the netCDF include files
# encoded as a -I string, e.g.
# "-I/dir3 -I/dir4"
# (note, this is for consistency with the XIOS build process
# required for the CI build machine.
# this is not required for other directories)
#
# XIOS_INCDIR: The directory for XIOS include files
# XIOS_LIBDIR: The directory for XIOS lib files
# XIOS_BINDIR: The directory for XIOS binary files

FCFLAGS = -g

FC = mpif90 # compiler driver for MPI programs

# compiler flag, includes
FCFLAGS += -I$(XIOS_INCDIR) $(NETCDF_INCDIR)

# loader flags
LDFLAGS = \
-L$(XIOS_LIBDIR) \
$(NETCDF_LIBDIR) \
-lxios \
-lnetcdf \
-lnetcdff \
-lstdc++

all: write_parallel

# fortran compilation
%.o: %.F90
$(FC) $(FCFLAGS) -c $<

# fortran linking
write_parallel: write_parallel.o
$(FC) -o write_parallel.exe write_parallel.o $(LDFLAGS) \
&& ln -fs $(XIOS_BINDIR)/xios_server.exe .

run:
mpiexec -n 1 ./write_parallel.exe : -n 1 ./xios_server.exe

# cleanup
clean:
rm -f *.exe *.o *.mod *.MOD *.out *.err *.nc
17 changes: 17 additions & 0 deletions xios_examples/write_domain_parallel/README
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
write_domain_parallel
---------------------

This example demonstrates XIOS parallel write of a decomposed domain to one output NetCDF file.
It sets up a 100 x 100 unstructured horizontal domain with 3 vertical levels and creates a field with sample data.
Data is written at hourly intervals for 10 timesteps and incremented by 0.2 units.

Unit tests include:

- Checking the output file exists and is named correctly.
- Checking that the file has the correct number of times (10)
- Checking that the average value of the field in level 1 and final
timestep matches what we expect based on the starting value and number
of timesteps and increment (2.8)



3 changes: 3 additions & 0 deletions xios_examples/write_domain_parallel/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""
Enable this folder to be a module path, for imports and test discovery.
"""
7 changes: 7 additions & 0 deletions xios_examples/write_domain_parallel/iodef.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
<?xml version="1.0"?>
<simulation>

<context id="main" src="main.xml"/>
<context id="xios" src="xios.xml"/>

</simulation>
33 changes: 33 additions & 0 deletions xios_examples/write_domain_parallel/main.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
<context>

<calendar type="Gregorian"/>

<field_definition enabled=".TRUE." freq_op="1ts" operation="instant" >
<field id="global_field_1" name="global_field_1" long_name="global_field_1" unit="1" grid_ref="grid_2d" />
</field_definition>



! For convention use CF for unstructured NetCDF and UGRID for UGRID NetCDF
<file_definition type="one_file" output_freq="1ts" par_access="collective" enabled=".TRUE.">
<file id="domain_output_1" name="domain_output_1" convention="UGRID">
<field field_ref="global_field_1"/>
</file>
</file_definition>

<axis_definition>
<axis id="vert_axis" name="levels"/>
</axis_definition>

<domain_definition>
<domain id="domain_2d" name="domain_2d" nvertex="4"/>
</domain_definition>

<grid_definition>
<grid id="grid_2d">
<domain domain_ref="domain_2d"/>
<axis axis_ref="vert_axis"/>
</grid>
</grid_definition>

</context>
53 changes: 53 additions & 0 deletions xios_examples/write_domain_parallel/test_write_simple.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import os
import subprocess
import unittest

import netCDF4
import numpy as np

import xios_examples.shared_testing as xshared

this_path = os.path.realpath(__file__)
this_dir = os.path.dirname(this_path)


class TestParallelWrite(xshared._TestCase):
test_dir = this_dir
transient_inputs = []
transient_outputs = ["domain_output_1.nc"]
rtol = 5e-04
executable = './write_parallel.exe'

def test_parallel_write(self):
# run the compiled Fortran XIOS programme
with open('{}/xios.xml'.format(self.test_dir)) as cxml:
print(cxml.read(), flush=True)
self.run_mpi_xios(nclients=2, nservers=2)
outputfile_1 = self.transient_outputs[0]

# Check the expected output file exists
runfile_1 = '{}/{}'.format(self.test_dir, outputfile_1)
self.assertTrue(os.path.exists(runfile_1))

# Checks for output file

rootgrp = netCDF4.Dataset(runfile_1, 'r')
file_1_data = rootgrp['global_field_1']

# Check file has 10 times
self.assertTrue(file_1_data.shape[0] == 10)
# Check average value of file for level 1, time 1
expected = 2.8
result = np.average(file_1_data[-1,0,:])
diff = result - expected
# prepare message for failure
msg = (self.transient_outputs[0] + ': the expected result\n {exp}\n '
'differs from the actual result\n {res} \n '
'with diff \n {diff}\n')
msg = msg.format(exp=expected, res=result, diff=diff)
self.assertTrue(np.allclose(result, expected, rtol=self.rtol), msg=msg)





207 changes: 207 additions & 0 deletions xios_examples/write_domain_parallel/write_parallel.F90
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
!-----------------------------------------------------------------------------
! (C) Crown copyright 2024 Met Office. All rights reserved.
! The file LICENCE, distributed with this code, contains details of the terms
! under which the code may be used.
!-----------------------------------------------------------------------------
!> Set up a 2D unstructured domain of arbitrary data and output in parallel to
!> one NetCDF file at the defined frequency

program write_parallel
use xios
use mpi

implicit none

integer :: comm = -1
integer :: ierr = 0
integer :: size, rank
integer :: n_steps = 10
integer :: ts = 0

double precision,allocatable :: levels(:)
double precision,allocatable :: lon_glo(:)
double precision,allocatable :: lat_glo(:)
double precision,allocatable :: bounds_lon_glo(:,:)
double precision,allocatable :: bounds_lat_glo(:,:)
double precision,allocatable :: field_1_glo(:,:)
double precision,allocatable :: lon_lo(:)
double precision,allocatable :: lat_lo(:)
double precision,allocatable :: bounds_lon_lo(:,:)
double precision,allocatable :: bounds_lat_lo(:,:)
double precision,allocatable :: field_1_lo(:,:)

call initialise()
call simulate()
call finalise()
contains

subroutine initialise()

type(xios_date) :: origin
type(xios_date) :: start
type(xios_duration) :: tstep
integer :: mpi_error
character(len=*),parameter :: id="client"

integer :: nlon = 100
integer :: nlat = 100
integer :: nlevs = 3
integer :: ncell
integer :: ilat, ilon, ilev, ind
integer :: ni, ibegin
double precision :: lon1, lon2, lat1, lat2

! Initialise MPI and XIOS
call MPI_INIT(ierr)
call xios_initialize(id,return_comm=comm)


!------------------------Set up Vertical levels-------------------!

allocate(levels(nlevs)) ; levels=(/(ilev,ilev=1,nlevs)/)

!------------------------Set up Global Horizontal domain-------------------!

! Regions around the poles are not included into the grid
! The whole grid is rectangular (nvertex=4)

ncell = nlon * (nlat-1)
allocate(lon_glo(ncell))
allocate(lat_glo(ncell))
allocate(bounds_lon_glo(4,ncell))
allocate(bounds_lat_glo(4,ncell))


allocate(field_1_glo(ncell,nlevs))

ind = 0
do ilat = 1, nlat-1
do ilon = 1, nlon

ind=ind+1

lon1 = 360./dble(nlon) * dble(ilon-1)
lon2 = lon1 + 360./DBLE(nlon)

lat1 = (90. + 90./dble(nlat)) - 180./dble(nlat)*dble(ilat)
lat2 = lat1 - 180./dble(nlat)

lon_glo(ind) = (lon1+lon2)*0.5
lat_glo(ind) = (lat1+lat2)*0.5

bounds_lon_glo(1,ind) = lon1
bounds_lon_glo(2,ind) = lon2
bounds_lon_glo(3,ind) = lon2
bounds_lon_glo(4,ind) = lon1

bounds_lat_glo(1,ind) = lat1
bounds_lat_glo(2,ind) = lat1
bounds_lat_glo(3,ind) = lat2
bounds_lat_glo(4,ind) = lat2

! Set data field arrays for each level to the level number

do ilev = 1, nlevs
field_1_glo(ind,ilev) = dble(ilev)
end do

enddo
enddo



!------------------------Set up Local Partitioned Horizontal domain-------------------!


call MPI_COMM_RANK(comm,rank,ierr)
call MPI_COMM_SIZE(comm,size,ierr)

if (mod(ncell, size) == 0) then
ni = ncell/size
ibegin = rank*ni
else
if (rank < MOD(ncell, size)) then
ni = ncell/size + 1
ibegin = rank*(ncell/size + 1)
else
ni = ncell/size
if (rank == MOD(ncell, size)) then
ibegin = rank*(ncell/size + 1)
else
ibegin = MOD(ncell,size)*(ncell/size + 1) + (rank-MOD(ncell,size))*ncell/size
end if
end if
end if

allocate(lon_lo(ni))
allocate(lat_lo(ni))
allocate(bounds_lon_lo(4,ni))
allocate(bounds_lat_lo(4,ni))
allocate(field_1_lo(ni,nlevs))

lon_lo = lon_glo(1+ibegin:ibegin+ni)
lat_lo = lat_glo(1+ibegin:ibegin+ni)
bounds_lon_lo(:,:) = bounds_lon_glo(:,1+ibegin:ibegin+ni)
bounds_lat_lo(:,:) = bounds_lat_glo(:,1+ibegin:ibegin+ni)

field_1_lo(:,:) = field_1_glo(1+ibegin:ibegin+ni,:)


! Arbitrary datetime setup
origin = xios_date(2022, 2, 2, 12, 0, 0)
start = xios_date(2022, 12, 13, 12, 0, 0)
tstep = xios_hour

call xios_context_initialize('main', comm)
call xios_set_time_origin(origin)
call xios_set_start_date(start)
call xios_set_timestep(tstep)


call xios_set_domain_attr("domain_2d", ni_glo=ncell, ibegin=ibegin, ni=ni, type='unstructured')
call xios_set_domain_attr("domain_2d", lonvalue_1d=lon_lo, latvalue_1d=lat_lo)
call xios_set_domain_attr("domain_2d", bounds_lon_1d=bounds_lon_lo, bounds_lat_1d=bounds_lat_lo)


call xios_set_axis_attr("vert_axis", n_glo=nlevs, value=levels)

call xios_close_context_definition()

end subroutine initialise

subroutine finalise()

integer :: mpi_error

! Finalise XIOS and MPI
call xios_context_finalize()

call xios_finalize()
call MPI_Finalize(mpi_error)

end subroutine finalise

subroutine simulate()

do ts=1,n_steps

call xios_update_calendar(ts)
! Output field 1 every timestep
call xios_send_field("global_field_1",field_1_lo)
! increment field value
field_1_lo = field_1_lo + 0.2

enddo

! Clean up
deallocate(levels)
deallocate(lon_glo, lat_glo)
deallocate(bounds_lon_glo, bounds_lat_glo)
deallocate(field_1_glo)
deallocate(lon_lo, lat_lo)
deallocate(bounds_lon_lo, bounds_lat_lo)
deallocate(field_1_lo)

end subroutine simulate

end program write_parallel
Loading